diff --git a/CHANGELOG.md b/CHANGELOG.md index 02cdb36e137..54eb7b3c092 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Changed - Change the scope name for the prometheus bridge to `go.opentelemetry.io/contrib/bridges/prometheus` to match the package. (#5396) +- Add support for settings additional properties for resource configuration in `go.opentelemetry.io/contrib/config`. (#4832) ### Fixed diff --git a/config/generated_config.go b/config/generated_config.go index 68f8a3f9e07..64154ee5437 100644 --- a/config/generated_config.go +++ b/config/generated_config.go @@ -14,11 +14,15 @@ type AttributeLimits struct { // AttributeValueLengthLimit corresponds to the JSON schema field // "attribute_value_length_limit". AttributeValueLengthLimit *int `mapstructure:"attribute_value_length_limit,omitempty"` + + AdditionalProperties interface{} } type Attributes struct { // ServiceName corresponds to the JSON schema field "service.name". ServiceName *string `mapstructure:"service.name,omitempty"` + + AdditionalProperties interface{} } type BatchLogRecordProcessor struct { @@ -39,6 +43,24 @@ type BatchLogRecordProcessor struct { ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + type BatchSpanProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". ExportTimeout *int `mapstructure:"export_timeout,omitempty"` @@ -57,6 +79,24 @@ type BatchSpanProcessor struct { ScheduleDelay *int `mapstructure:"schedule_delay,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + type Common map[string]interface{} type Console map[string]interface{} @@ -66,6 +106,8 @@ type Headers map[string]string type LogRecordExporter struct { // OTLP corresponds to the JSON schema field "otlp". OTLP *OTLP `mapstructure:"otlp,omitempty"` + + AdditionalProperties interface{} } type LogRecordLimits struct { @@ -84,6 +126,8 @@ type LogRecordProcessor struct { // Simple corresponds to the JSON schema field "simple". Simple *SimpleLogRecordProcessor `mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} } type LoggerProvider struct { @@ -111,6 +155,8 @@ type MetricExporter struct { // Prometheus corresponds to the JSON schema field "prometheus". Prometheus *Prometheus `mapstructure:"prometheus,omitempty"` + + AdditionalProperties interface{} } type MetricReader struct { @@ -186,6 +232,73 @@ type OTLPMetricDefaultHistogramAggregation string const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" +var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) + } + *j = OTLPMetricDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetric) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLPMetric: required") + } + type Plain OTLPMetric + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPMetric(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLP) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLP: required") + } + type Plain OTLP + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLP(plain) + return nil +} + type OpenTelemetryConfiguration struct { // AttributeLimits corresponds to the JSON schema field "attribute_limits". AttributeLimits *AttributeLimits `mapstructure:"attribute_limits,omitempty"` @@ -210,6 +323,26 @@ type OpenTelemetryConfiguration struct { // TracerProvider corresponds to the JSON schema field "tracer_provider". TracerProvider *TracerProvider `mapstructure:"tracer_provider,omitempty"` + + AdditionalProperties interface{} +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["file_format"]; raw != nil && !ok { + return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") + } + type Plain OpenTelemetryConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OpenTelemetryConfiguration(plain) + return nil } type PeriodicMetricReader struct { @@ -223,6 +356,24 @@ type PeriodicMetricReader struct { Timeout *int `mapstructure:"timeout,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + type Prometheus struct { // Host corresponds to the JSON schema field "host". Host *string `mapstructure:"host,omitempty"` @@ -243,6 +394,8 @@ type Prometheus struct { type Propagator struct { // Composite corresponds to the JSON schema field "composite". Composite []string `mapstructure:"composite,omitempty"` + + AdditionalProperties interface{} } type PullMetricReader struct { @@ -250,6 +403,24 @@ type PullMetricReader struct { Exporter MetricExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + type Resource struct { // Attributes corresponds to the JSON schema field "attributes". Attributes *Attributes `mapstructure:"attributes,omitempty"` @@ -273,6 +444,8 @@ type Sampler struct { // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". TraceIDRatioBased *SamplerTraceIDRatioBased `mapstructure:"trace_id_ratio_based,omitempty"` + + AdditionalProperties interface{} } type SamplerAlwaysOff map[string]interface{} @@ -320,11 +493,47 @@ type SimpleLogRecordProcessor struct { Exporter LogRecordExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + type SimpleSpanProcessor struct { // Exporter corresponds to the JSON schema field "exporter". Exporter SpanExporter `mapstructure:"exporter"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + type SpanExporter struct { // Console corresponds to the JSON schema field "console". Console Console `mapstructure:"console,omitempty"` @@ -334,6 +543,8 @@ type SpanExporter struct { // Zipkin corresponds to the JSON schema field "zipkin". Zipkin *Zipkin `mapstructure:"zipkin,omitempty"` + + AdditionalProperties interface{} } type SpanLimits struct { @@ -366,6 +577,8 @@ type SpanProcessor struct { // Simple corresponds to the JSON schema field "simple". Simple *SimpleSpanProcessor `mapstructure:"simple,omitempty"` + + AdditionalProperties interface{} } type TracerProvider struct { @@ -416,6 +629,35 @@ const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "ob const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" +var enumValues_ViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_ViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) + } + *j = ViewSelectorInstrumentType(v) + return nil +} + type ViewStream struct { // Aggregation corresponds to the JSON schema field "aggregation". Aggregation *ViewStreamAggregation `mapstructure:"aggregation,omitempty"` @@ -487,159 +729,13 @@ type Zipkin struct { Timeout *int `mapstructure:"timeout,omitempty"` } -var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["endpoint"]; !ok || v == nil { - return fmt.Errorf("field endpoint in OTLPMetric: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. func (j *Zipkin) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["endpoint"]; !ok || v == nil { + if _, ok := raw["endpoint"]; raw != nil && !ok { return fmt.Errorf("field endpoint in Zipkin: required") } type Plain Zipkin @@ -650,78 +746,3 @@ func (j *Zipkin) UnmarshalJSON(b []byte) error { *j = Zipkin(plain) return nil } - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["exporter"]; !ok || v == nil { - return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["endpoint"]; !ok || v == nil { - return fmt.Errorf("field endpoint in OTLP: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["file_format"]; !ok || v == nil { - return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OpenTelemetryConfiguration(plain) - return nil -} diff --git a/config/resource.go b/config/resource.go index 6cbc2869866..3caeb89e302 100644 --- a/config/resource.go +++ b/config/resource.go @@ -4,16 +4,64 @@ package config // import "go.opentelemetry.io/contrib/config" import ( + "fmt" + + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" ) +func keyVal(k string, v any) attribute.KeyValue { + switch val := v.(type) { + case bool: + return attribute.Bool(k, val) + case int64: + return attribute.Int64(k, val) + case uint64: + return attribute.String(k, fmt.Sprintf("%d", val)) + case float64: + return attribute.Float64(k, val) + case int8: + return attribute.Int64(k, int64(val)) + case uint8: + return attribute.Int64(k, int64(val)) + case int16: + return attribute.Int64(k, int64(val)) + case uint16: + return attribute.Int64(k, int64(val)) + case int32: + return attribute.Int64(k, int64(val)) + case uint32: + return attribute.Int64(k, int64(val)) + case float32: + return attribute.Float64(k, float64(val)) + case int: + return attribute.Int(k, val) + case uint: + return attribute.String(k, fmt.Sprintf("%d", val)) + case string: + return attribute.String(k, val) + default: + return attribute.String(k, fmt.Sprint(v)) + } +} + func newResource(res *Resource) (*resource.Resource, error) { if res == nil || res.Attributes == nil { return resource.Default(), nil } + attrs := []attribute.KeyValue{ + semconv.ServiceName(*res.Attributes.ServiceName), + } + + if props, ok := res.Attributes.AdditionalProperties.(map[string]any); ok { + for k, v := range props { + attrs = append(attrs, keyVal(k, v)) + } + } + return resource.Merge(resource.Default(), resource.NewWithAttributes(*res.SchemaUrl, - semconv.ServiceName(*res.Attributes.ServiceName), + attrs..., )) } diff --git a/config/resource_test.go b/config/resource_test.go index 27a76c73f8c..f999c1b847a 100644 --- a/config/resource_test.go +++ b/config/resource_test.go @@ -4,20 +4,45 @@ package config // import "go.opentelemetry.io/contrib/config" import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" ) +type mockType struct{} + func TestNewResource(t *testing.T) { res, err := resource.Merge(resource.Default(), resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceName("service-a"), )) + other := mockType{} + require.NoError(t, err) + resWithAttrs, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("service-a"), + attribute.Bool("attr-bool", true), + attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), + attribute.Int64("attr-int64", int64(-164)), + attribute.Float64("attr-float64", float64(64.0)), + attribute.Int64("attr-int8", int64(-18)), + attribute.Int64("attr-uint8", int64(18)), + attribute.Int64("attr-int16", int64(-116)), + attribute.Int64("attr-uint16", int64(116)), + attribute.Int64("attr-int32", int64(-132)), + attribute.Int64("attr-uint32", int64(132)), + attribute.Float64("attr-float32", float64(32.0)), + attribute.Int64("attr-int", int64(-1)), + attribute.String("attr-uint", fmt.Sprintf("%d", 1)), + attribute.String("attr-string", "string-val"), + attribute.String("attr-default", fmt.Sprintf("%v", other)), + )) require.NoError(t, err) tests := []struct { name string @@ -55,6 +80,33 @@ func TestNewResource(t *testing.T) { }, wantResource: res, }, + { + name: "resource-with-additional-attributes-and-schema", + config: &Resource{ + Attributes: &Attributes{ + ServiceName: ptr("service-a"), + AdditionalProperties: map[string]any{ + "attr-bool": true, + "attr-int64": int64(-164), + "attr-uint64": uint64(164), + "attr-float64": float64(64.0), + "attr-int8": int8(-18), + "attr-uint8": uint8(18), + "attr-int16": int16(-116), + "attr-uint16": uint16(116), + "attr-int32": int32(-132), + "attr-uint32": uint32(132), + "attr-float32": float32(32.0), + "attr-int": int(-1), + "attr-uint": uint(1), + "attr-string": "string-val", + "attr-default": other, + }, + }, + SchemaUrl: ptr(semconv.SchemaURL), + }, + wantResource: resWithAttrs, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/config/trace_test.go b/config/trace_test.go index de1461342ae..3c8c9fa2de6 100644 --- a/config/trace_test.go +++ b/config/trace_test.go @@ -103,7 +103,7 @@ func TestSpanProcessor(t *testing.T) { }{ { name: "no processor", - wantErr: errors.New("unsupported span processor type { }"), + wantErr: errors.New("unsupported span processor type { }"), }, { name: "multiple processor types",