From af7a01a9b1d72d169cc216ca1637dffc18e8f474 Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Fri, 19 Jan 2024 14:41:12 -0800 Subject: [PATCH 1/2] config: add support for additional properties in resource This allows the configuration of other attributes than service.name. Signed-off-by: Alex Boten --- config/generated_config.go | 250 ++++++++++++++++++++++++++++++++----- config/resource.go | 50 +++++++- config/resource_test.go | 52 ++++++++ config/trace_test.go | 2 +- 4 files changed, 320 insertions(+), 34 deletions(-) diff --git a/config/generated_config.go b/config/generated_config.go index 68f8a3f9e07..154fb209978 100644 --- a/config/generated_config.go +++ b/config/generated_config.go @@ -14,11 +14,15 @@ type AttributeLimits struct { // AttributeValueLengthLimit corresponds to the JSON schema field // "attribute_value_length_limit". AttributeValueLengthLimit *int `mapstructure:"attribute_value_length_limit,omitempty"` + + AdditionalProperties interface{} } type Attributes struct { // ServiceName corresponds to the JSON schema field "service.name". ServiceName *string `mapstructure:"service.name,omitempty"` + + AdditionalProperties interface{} } type BatchLogRecordProcessor struct { @@ -66,6 +70,8 @@ type Headers map[string]string type LogRecordExporter struct { // OTLP corresponds to the JSON schema field "otlp". OTLP *OTLP `mapstructure:"otlp,omitempty"` + + AdditionalProperties interface{} } type LogRecordLimits struct { @@ -84,6 +90,8 @@ type LogRecordProcessor struct { // Simple corresponds to the JSON schema field "simple". Simple *SimpleLogRecordProcessor `mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} } type LoggerProvider struct { @@ -111,6 +119,8 @@ type MetricExporter struct { // Prometheus corresponds to the JSON schema field "prometheus". Prometheus *Prometheus `mapstructure:"prometheus,omitempty"` + + AdditionalProperties interface{} } type MetricReader struct { @@ -210,6 +220,8 @@ type OpenTelemetryConfiguration struct { // TracerProvider corresponds to the JSON schema field "tracer_provider". TracerProvider *TracerProvider `mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} } type PeriodicMetricReader struct { @@ -243,6 +255,8 @@ type Prometheus struct { type Propagator struct { // Composite corresponds to the JSON schema field "composite". Composite []string `mapstructure:"composite,omitempty"` + + AdditionalProperties interface{} } type PullMetricReader struct { @@ -273,6 +287,8 @@ type Sampler struct { // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". TraceIDRatioBased *SamplerTraceIDRatioBased `mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} } type SamplerAlwaysOff map[string]interface{} @@ -334,6 +350,8 @@ type SpanExporter struct { // Zipkin corresponds to the JSON schema field "zipkin". Zipkin *Zipkin `mapstructure:"zipkin,omitempty"` + + AdditionalProperties interface{} } type SpanLimits struct { @@ -366,6 +384,8 @@ type SpanProcessor struct { // Simple corresponds to the JSON schema field "simple". Simple *SimpleSpanProcessor `mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} } type TracerProvider struct { @@ -491,50 +511,87 @@ var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ "explicit_bucket_histogram", "base2_exponential_bucket_histogram", } -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} // UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { +func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { + for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) } - *j = ViewSelectorInstrumentType(v) + *j = OTLPMetricDefaultHistogramAggregation(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { +func (j *Attributes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") + type Plain Attributes + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain BatchSpanProcessor + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = Attributes(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Zipkin) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["endpoint"]; !ok || v == nil { + return fmt.Errorf("field endpoint in Zipkin: required") + } + type Plain Zipkin var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = BatchSpanProcessor(plain) + *j = Zipkin(plain) + return nil +} + +var enumValues_ViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpanExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpanExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = SpanExporter(plain) return nil } @@ -556,6 +613,24 @@ func (j *PullMetricReader) UnmarshalJSON(b []byte) error { return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -574,6 +649,24 @@ func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *MetricExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain MetricExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = MetricExporter(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *OTLPMetric) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -596,58 +689,76 @@ func (j *OTLPMetric) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { + for _, expected := range enumValues_ViewSelectorInstrumentType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) } - *j = OTLPMetricDefaultHistogramAggregation(v) + *j = ViewSelectorInstrumentType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { +func (j *Propagator) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor + type Plain Propagator var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SimpleSpanProcessor(plain) + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = Propagator(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *Zipkin) UnmarshalJSON(b []byte) error { +func (j *LogRecordProcessor) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["endpoint"]; !ok || v == nil { - return fmt.Errorf("field endpoint in Zipkin: required") + type Plain LogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain Zipkin + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = LogRecordProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Sampler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain Sampler var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = Zipkin(plain) + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = Sampler(plain) return nil } @@ -669,6 +780,24 @@ func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["exporter"]; !ok || v == nil { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -687,6 +816,42 @@ func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *LogRecordExporter) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain LogRecordExporter + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = LogRecordExporter(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = SpanProcessor(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *OTLP) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -708,6 +873,24 @@ func (j *OTLP) UnmarshalJSON(b []byte) error { return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeLimits) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain AttributeLimits + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } + *j = AttributeLimits(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -722,6 +905,9 @@ func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { if err := json.Unmarshal(b, &plain); err != nil { return err } + if v, ok := raw[""]; !ok || v == nil { + plain.AdditionalProperties = map[string]interface{}{} + } *j = OpenTelemetryConfiguration(plain) return nil } diff --git a/config/resource.go b/config/resource.go index 6cbc2869866..3caeb89e302 100644 --- a/config/resource.go +++ b/config/resource.go @@ -4,16 +4,64 @@ package config // import "go.opentelemetry.io/contrib/config" import ( + "fmt" + + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" ) +func keyVal(k string, v any) attribute.KeyValue { + switch val := v.(type) { + case bool: + return attribute.Bool(k, val) + case int64: + return attribute.Int64(k, val) + case uint64: + return attribute.String(k, fmt.Sprintf("%d", val)) + case float64: + return attribute.Float64(k, val) + case int8: + return attribute.Int64(k, int64(val)) + case uint8: + return attribute.Int64(k, int64(val)) + case int16: + return attribute.Int64(k, int64(val)) + case uint16: + return attribute.Int64(k, int64(val)) + case int32: + return attribute.Int64(k, int64(val)) + case uint32: + return attribute.Int64(k, int64(val)) + case float32: + return attribute.Float64(k, float64(val)) + case int: + return attribute.Int(k, val) + case uint: + return attribute.String(k, fmt.Sprintf("%d", val)) + case string: + return attribute.String(k, val) + default: + return attribute.String(k, fmt.Sprint(v)) + } +} + func newResource(res *Resource) (*resource.Resource, error) { if res == nil || res.Attributes == nil { return resource.Default(), nil } + attrs := []attribute.KeyValue{ + semconv.ServiceName(*res.Attributes.ServiceName), + } + + if props, ok := res.Attributes.AdditionalProperties.(map[string]any); ok { + for k, v := range props { + attrs = append(attrs, keyVal(k, v)) + } + } + return resource.Merge(resource.Default(), resource.NewWithAttributes(*res.SchemaUrl, - semconv.ServiceName(*res.Attributes.ServiceName), + attrs..., )) } diff --git a/config/resource_test.go b/config/resource_test.go index 27a76c73f8c..f999c1b847a 100644 --- a/config/resource_test.go +++ b/config/resource_test.go @@ -4,20 +4,45 @@ package config // import "go.opentelemetry.io/contrib/config" import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" ) +type mockType struct{} + func TestNewResource(t *testing.T) { res, err := resource.Merge(resource.Default(), resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceName("service-a"), )) + other := mockType{} + require.NoError(t, err) + resWithAttrs, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + attribute.Bool("attr-bool", true), + attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), + attribute.Int64("attr-int64", int64(-164)), + attribute.Float64("attr-float64", float64(64.0)), + attribute.Int64("attr-int8", int64(-18)), + attribute.Int64("attr-uint8", int64(18)), + attribute.Int64("attr-int16", int64(-116)), + attribute.Int64("attr-uint16", int64(116)), + attribute.Int64("attr-int32", int64(-132)), + attribute.Int64("attr-uint32", int64(132)), + attribute.Float64("attr-float32", float64(32.0)), + attribute.Int64("attr-int", int64(-1)), + attribute.String("attr-uint", fmt.Sprintf("%d", 1)), + attribute.String("attr-string", "string-val"), + attribute.String("attr-default", fmt.Sprintf("%v", other)), + )) require.NoError(t, err) tests := []struct { name string @@ -55,6 +80,33 @@ func TestNewResource(t *testing.T) { }, wantResource: res, }, + { + name: "resource-with-additional-attributes-and-schema", + config: &Resource{ + Attributes: &Attributes{ + ServiceName: ptr("service-a"), + AdditionalProperties: map[string]any{ + "attr-bool": true, + "attr-int64": int64(-164), + "attr-uint64": uint64(164), + "attr-float64": float64(64.0), + "attr-int8": int8(-18), + "attr-uint8": uint8(18), + "attr-int16": int16(-116), + "attr-uint16": uint16(116), + "attr-int32": int32(-132), + "attr-uint32": uint32(132), + "attr-float32": float32(32.0), + "attr-int": int(-1), + "attr-uint": uint(1), + "attr-string": "string-val", + "attr-default": other, + }, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resWithAttrs, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/config/trace_test.go b/config/trace_test.go index de1461342ae..3c8c9fa2de6 100644 --- a/config/trace_test.go +++ b/config/trace_test.go @@ -103,7 +103,7 @@ func TestSpanProcessor(t *testing.T) { }{ { name: "no processor", - wantErr: errors.New("unsupported span processor type { }"), + wantErr: errors.New("unsupported span processor type { }"), }, { name: "multiple processor types", From 72ad91bb4a55f94fd55d4cfdf27032a67830f2e9 Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Tue, 23 Apr 2024 14:50:20 -0700 Subject: [PATCH 2/2] update generated code + changelog Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com> --- CHANGELOG.md | 1 + config/generated_config.go | 611 ++++++++++++++----------------------- 2 files changed, 224 insertions(+), 388 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e15cbf3c8c7..9ca57f0c714 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Changed - Change the scope name for the prometheus bridge to `go.opentelemetry.io/contrib/bridges/prometheus` to match the package. (#5396) +- Add support for settings additional properties for resource configuration in `go.opentelemetry.io/contrib/config`. (#4832) ### Fixed diff --git a/config/generated_config.go b/config/generated_config.go index 154fb209978..64154ee5437 100644 --- a/config/generated_config.go +++ b/config/generated_config.go @@ -43,6 +43,24 @@ type BatchLogRecordProcessor struct { ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + type BatchSpanProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". ExportTimeout *int `mapstructure:"export_timeout,omitempty"` @@ -61,6 +79,24 @@ type BatchSpanProcessor struct { ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + type Common map[string]interface{} type Console map[string]interface{} @@ -196,6 +232,73 @@ type OTLPMetricDefaultHistogramAggregation string const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" +var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) + } + *j = OTLPMetricDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetric) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLPMetric: required") + } + type Plain OTLPMetric + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPMetric(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLP) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLP: required") + } + type Plain OTLP + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLP(plain) + return nil +} + type OpenTelemetryConfiguration struct { // AttributeLimits corresponds to the JSON schema field "attribute_limits". AttributeLimits *AttributeLimits `mapstructure:"attribute_limits,omitempty"` @@ -224,6 +327,24 @@ type OpenTelemetryConfiguration struct { AdditionalProperties interface{} } +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["file_format"]; raw != nil && !ok { + return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") + } + type Plain OpenTelemetryConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OpenTelemetryConfiguration(plain) + return nil +} + type PeriodicMetricReader struct { // Exporter corresponds to the JSON schema field "exporter". Exporter MetricExporter `mapstructure:"exporter"` @@ -235,6 +356,24 @@ type PeriodicMetricReader struct { Timeout *int `mapstructure:"timeout,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + type Prometheus struct { // Host corresponds to the JSON schema field "host". Host *string `mapstructure:"host,omitempty"` @@ -264,6 +403,24 @@ type PullMetricReader struct { Exporter MetricExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + type Resource struct { // Attributes corresponds to the JSON schema field "attributes". Attributes *Attributes `mapstructure:"attributes,omitempty"` @@ -336,11 +493,47 @@ type SimpleLogRecordProcessor struct { Exporter LogRecordExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + type SimpleSpanProcessor struct { // Exporter corresponds to the JSON schema field "exporter". Exporter SpanExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + type SpanExporter struct { // Console corresponds to the JSON schema field "console". Console Console `mapstructure:"console,omitempty"` @@ -436,6 +629,35 @@ const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "ob const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" +var enumValues_ViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_ViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) + } + *j = ViewSelectorInstrumentType(v) + return nil +} + type ViewStream struct { // Aggregation corresponds to the JSON schema field "aggregation". Aggregation *ViewStreamAggregation `mapstructure:"aggregation,omitempty"` @@ -507,56 +729,13 @@ type Zipkin struct { Timeout *int `mapstructure:"timeout,omitempty"` } -var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Attributes) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain Attributes - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = Attributes(plain) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. func (j *Zipkin) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["endpoint"]; !ok || v == nil { + if _, ok := raw["endpoint"]; raw != nil && !ok { return fmt.Errorf("field endpoint in Zipkin: required") } type Plain Zipkin @@ -567,347 +746,3 @@ func (j *Zipkin) UnmarshalJSON(b []byte) error { *j = Zipkin(plain) return nil } - -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpanExporter) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain SpanExporter - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = SpanExporter(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *MetricExporter) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain MetricExporter - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = MetricExporter(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["endpoint"]; !ok || v == nil { - return fmt.Errorf("field endpoint in OTLPMetric: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Propagator) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain Propagator - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = Propagator(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *LogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain LogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = LogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Sampler) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain Sampler - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = Sampler(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *LogRecordExporter) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain LogRecordExporter - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = LogRecordExporter(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain SpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = SpanProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["endpoint"]; !ok || v == nil { - return fmt.Errorf("field endpoint in OTLP: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *AttributeLimits) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain AttributeLimits - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = AttributeLimits(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["file_format"]; !ok || v == nil { - return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if v, ok := raw[""]; !ok || v == nil { - plain.AdditionalProperties = map[string]interface{}{} - } - *j = OpenTelemetryConfiguration(plain) - return nil -}