Skip to content

Commit

Permalink
add otlp histograms to sfx exporter
Browse files Browse the repository at this point in the history
  • Loading branch information
jinja2 committed Feb 14, 2024
1 parent 471936e commit 33f7bb0
Show file tree
Hide file tree
Showing 17 changed files with 894 additions and 81 deletions.
27 changes: 27 additions & 0 deletions .chloggen/signalfx-exp-otlp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: exporter/signalfx

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Send histograms in otlp format with new config `send_otlp_histograms` option

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [31052]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
3 changes: 2 additions & 1 deletion exporter/signalfxexporter/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,8 @@ will be replaced with a `_`.
api_tls:
ca_file: "/etc/opt/certs/ca.pem"
```
- `drop_histogram_buckets`: (default = `false`) if set to true, histogram buckets will not be translated into datapoints with `_bucket` suffix but will be dropped instead, only datapoints with `_sum`, `_count`, `_min` (optional) and `_max` (optional) suffixes will be sent.
- `drop_histogram_buckets`: (default = `false`) if set to true, histogram buckets will not be translated into datapoints with `_bucket` suffix but will be dropped instead, only datapoints with `_sum`, `_count`, `_min` (optional) and `_max` (optional) suffixes will be sent. Please note that this option does not apply to histograms sent in OTLP format with `send_otlp_histograms` enabled.
- `send_otlp_histograms`: (default: `false`) if set to true, any histogram metrics receiver by the exporter will be sent to Splunk Observability backend in OTLP format without conversion to SignalFx format. This can only be enabled if the Splunk Observability environment (realm) has the new Histograms feature rolled out. Please note that histograms sent in OTLP format do not apply to the exporter configurations `include_metrics` and `exclude_metrics`.
In addition, this exporter offers queued retry which is enabled by default.
Information about queued retry configuration parameters can be found
[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md).
Expand Down
6 changes: 5 additions & 1 deletion exporter/signalfxexporter/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ type Config struct {

// ExcludeMetrics defines dpfilter.MetricFilters that will determine metrics to be
// excluded from sending to SignalFx backend. If translations enabled with
// TranslationRules options, the exclusion will be applie on translated metrics.
// TranslationRules options, the exclusion will be applied on translated metrics.
ExcludeMetrics []dpfilters.MetricFilter `mapstructure:"exclude_metrics"`

// IncludeMetrics defines dpfilter.MetricFilters to override exclusion any of metric.
Expand All @@ -134,6 +134,10 @@ type Config struct {
// Whether to drop histogram bucket metrics dispatched to Splunk Observability.
// Default value is set to false.
DropHistogramBuckets bool `mapstructure:"drop_histogram_buckets"`

// Whether to send histogram metrics in OTLP format to Splunk Observability.
// Default value is set to false.
SendOTLPHistograms bool `mapstructure:"send_otlp_histograms"`
}

type DimensionClientConfig struct {
Expand Down
2 changes: 2 additions & 0 deletions exporter/signalfxexporter/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ func TestLoadConfig(t *testing.T) {
},
},
NonAlphanumericDimensionChars: "_-.",
SendOTLPHistograms: false,
},
},
{
Expand Down Expand Up @@ -263,6 +264,7 @@ func TestLoadConfig(t *testing.T) {
},
},
NonAlphanumericDimensionChars: "_-.",
SendOTLPHistograms: true,
},
},
}
Expand Down
144 changes: 125 additions & 19 deletions exporter/signalfxexporter/dpclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,20 @@ import (
sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model"
"go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.uber.org/zap"

"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation"
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/utils"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)

const (
contentEncodingHeader = "Content-Encoding"
contentTypeHeader = "Content-Type"
otlpProtobufContentType = "application/x-protobuf;format=otlp"
)

type sfxClientBase struct {
ingestURL *url.URL
headers map[string]string
Expand Down Expand Up @@ -58,6 +66,7 @@ type sfxDPClient struct {
logger *zap.Logger
accessTokenPassthrough bool
converter *translation.MetricsConverter
sendOTLPHistograms bool
}

func (s *sfxDPClient) pushMetricsData(
Expand All @@ -81,48 +90,55 @@ func (s *sfxDPClient) pushMetricsData(
// All metrics in the pmetric.Metrics will have the same access token because of the BatchPerResourceMetrics.
metricToken := s.retrieveAccessToken(rms.At(0))

// export SFx format
sfxDataPoints := s.converter.MetricsToSignalFxV2(md)
if s.logDataPoints {
for _, dp := range sfxDataPoints {
s.logger.Debug("Dispatching SFx datapoint", zap.Stringer("dp", dp))
if len(sfxDataPoints) > 0 {
droppedCount, err := s.pushMetricsDataForToken(ctx, sfxDataPoints, metricToken)
if err != nil {
return droppedCount, err
}
}
return s.pushMetricsDataForToken(ctx, sfxDataPoints, metricToken)
}

func (s *sfxDPClient) pushMetricsDataForToken(ctx context.Context, sfxDataPoints []*sfxpb.DataPoint, accessToken string) (int, error) {
body, compressed, err := s.encodeBody(sfxDataPoints)
if err != nil {
return len(sfxDataPoints), consumererror.NewPermanent(err)
// export any histograms in otlp if sendOTLPHistograms is true
if s.sendOTLPHistograms {
histogramData, metricCount := utils.GetHistograms(md)
if metricCount > 0 {
droppedCount, err := s.pushOTLPMetricsDataForToken(ctx, histogramData, metricToken)
if err != nil {
return droppedCount, err
}
}
}

return 0, nil

}

func (s *sfxDPClient) postData(ctx context.Context, body io.Reader, headers map[string]string) error {
datapointURL := *s.ingestURL
if !strings.HasSuffix(datapointURL.Path, "v2/datapoint") {
datapointURL.Path = path.Join(datapointURL.Path, "v2/datapoint")
}
req, err := http.NewRequestWithContext(ctx, "POST", datapointURL.String(), body)
if err != nil {
return len(sfxDataPoints), consumererror.NewPermanent(err)
return consumererror.NewPermanent(err)
}

// Set the headers configured in sfxDPClient
for k, v := range s.headers {
req.Header.Set(k, v)
}

// Override access token in headers map if it's non empty.
if accessToken != "" {
req.Header.Set(splunk.SFxAccessTokenHeader, accessToken)
}

if compressed {
req.Header.Set("Content-Encoding", "gzip")
// Set any extra headers passed by the caller
for k, v := range headers {
req.Header.Set(k, v)
}

// TODO: Mark errors as partial errors wherever applicable when, partial
// error for metrics is available.
resp, err := s.client.Do(req)
if err != nil {
return len(sfxDataPoints), err
return err
}

defer func() {
Expand All @@ -132,7 +148,39 @@ func (s *sfxDPClient) pushMetricsDataForToken(ctx context.Context, sfxDataPoints

err = splunk.HandleHTTPCode(resp)
if err != nil {
return len(sfxDataPoints), err
return err
}
return nil
}

func (s *sfxDPClient) pushMetricsDataForToken(ctx context.Context, sfxDataPoints []*sfxpb.DataPoint, accessToken string) (int, error) {

if s.logDataPoints {
for _, dp := range sfxDataPoints {
s.logger.Debug("Dispatching SFx datapoint", zap.Stringer("dp", dp))
}
}

body, compressed, err := s.encodeBody(sfxDataPoints)
dataPointCount := len(sfxDataPoints)
if err != nil {
return dataPointCount, consumererror.NewPermanent(err)
}

headers := make(map[string]string)

// Override access token in headers map if it's non empty.
if accessToken != "" {
headers[splunk.SFxAccessTokenHeader] = accessToken
}

if compressed {
headers[contentEncodingHeader] = "gzip"
}

err = s.postData(ctx, body, headers)
if err != nil {
return dataPointCount, err
}
return 0, nil
}
Expand Down Expand Up @@ -160,3 +208,61 @@ func (s *sfxDPClient) retrieveAccessToken(md pmetric.ResourceMetrics) string {
}
return ""
}

func (s *sfxDPClient) pushOTLPMetricsDataForToken(ctx context.Context, mh pmetric.Metrics, accessToken string) (int, error) {

dataPointCount := mh.DataPointCount()
if s.logDataPoints {
s.logger.Debug("Count of metrics to send in OTLP format",
zap.Int("resource metrics", mh.ResourceMetrics().Len()),
zap.Int("metrics", mh.MetricCount()),
zap.Int("data points", dataPointCount))
buf, err := metricsMarshaler.MarshalMetrics(mh)
if err != nil {
s.logger.Error("Failed to marshal metrics for logging otlp histograms", zap.Error(err))
} else {
s.logger.Debug("Dispatching OTLP metrics", zap.String("pmetrics", string(buf)))
}
}

body, compressed, err := s.encodeOTLPBody(mh)
if err != nil {
return dataPointCount, consumererror.NewPermanent(err)
}

headers := make(map[string]string)

// Set otlp content-type header
headers[contentTypeHeader] = otlpProtobufContentType

// Override access token in headers map if it's non-empty.
if accessToken != "" {
headers[splunk.SFxAccessTokenHeader] = accessToken
}

if compressed {
headers[contentEncodingHeader] = "gzip"
}

s.logger.Debug("Sending metrics in OTLP format")

err = s.postData(ctx, body, headers)

if err != nil {
return dataPointCount, consumererror.NewMetrics(err, mh)
}

return 0, nil
}

func (s *sfxDPClient) encodeOTLPBody(md pmetric.Metrics) (bodyReader io.Reader, compressed bool, err error) {

tr := pmetricotlp.NewExportRequestFromMetrics(md)

body, err := tr.MarshalProto()

if err != nil {
return nil, false, err
}
return s.getReader(body)
}
4 changes: 4 additions & 0 deletions exporter/signalfxexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ func newSignalFxExporter(
config.IncludeMetrics,
config.NonAlphanumericDimensionChars,
config.DropHistogramBuckets,
!config.SendOTLPHistograms, // if SendOTLPHistograms is true, do not process histograms when converting to SFx
)
if err != nil {
return nil, fmt.Errorf("failed to create metric converter: %w", err)
Expand Down Expand Up @@ -121,6 +122,7 @@ func (se *signalfxExporter) start(ctx context.Context, host component.Host) (err
logger: se.logger,
accessTokenPassthrough: se.config.AccessTokenPassthrough,
converter: se.converter,
sendOTLPHistograms: se.config.SendOTLPHistograms,
}

apiTLSCfg, err := se.config.APITLSSettings.LoadTLSConfig()
Expand Down Expand Up @@ -220,7 +222,9 @@ func (se *signalfxExporter) createClient(host component.Host) (*http.Client, err
}

func (se *signalfxExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error {

_, err := se.pushMetricsData(ctx, md)

if err == nil && se.hostMetadataSyncer != nil {
se.hostMetadataSyncer.Sync(md)
}
Expand Down
Loading

0 comments on commit 33f7bb0

Please sign in to comment.