diff --git a/cmd/tempo-query/main.go b/cmd/tempo-query/main.go index e205a4e1041..80235f1dd23 100644 --- a/cmd/tempo-query/main.go +++ b/cmd/tempo-query/main.go @@ -11,10 +11,9 @@ import ( "github.com/jaegertracing/jaeger/plugin/storage/grpc/shared" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" "github.com/spf13/viper" jaeger_config "github.com/uber/jaeger-client-go/config" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" google_grpc "google.golang.org/grpc" "github.com/grafana/tempo/cmd/tempo-query/tempo" @@ -62,8 +61,7 @@ func main() { Store: plugin, }, func(options []google_grpc.ServerOption) *google_grpc.Server { return hcplugin.DefaultGRPCServer([]google_grpc.ServerOption{ - google_grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer())), - google_grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer())), + google_grpc.StatsHandler(otelgrpc.NewServerHandler()), }) }) } diff --git a/cmd/tempo-query/tempo/plugin.go b/cmd/tempo-query/tempo/plugin.go index b045c2b94f3..e3a91254bf2 100644 --- a/cmd/tempo-query/tempo/plugin.go +++ b/cmd/tempo-query/tempo/plugin.go @@ -18,9 +18,9 @@ import ( tlsCfg "github.com/grafana/dskit/crypto/tls" "github.com/grafana/dskit/user" "github.com/grafana/tempo/pkg/tempopb" - "github.com/opentracing/opentracing-go" - ot_log "github.com/opentracing/opentracing-go/log" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" "google.golang.org/grpc/metadata" jaeger "github.com/jaegertracing/jaeger/model" @@ -52,6 +52,8 @@ var tlsVersions = map[string]uint16{ "VersionTLS13": tls.VersionTLS13, } +var tracer = otel.Tracer("tempo-query") + type Backend struct { tempoBackend string tlsEnabled bool @@ -181,10 +183,10 @@ func (b *Backend) apiSchema() string { func (b *Backend) GetTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger.Trace, error) { url := fmt.Sprintf("%s://%s/api/traces/%s", b.apiSchema(), b.tempoBackend, traceID) - span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetTrace") - defer span.Finish() + ctx, span := tracer.Start(ctx, "tempo-query.GetTrace") + defer span.End() - req, err := b.newGetRequest(ctx, url, span) + req, err := b.newGetRequest(ctx, url) if err != nil { return nil, err } @@ -226,7 +228,7 @@ func (b *Backend) GetTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger ProcessMap: []jaeger.Trace_ProcessMapping{}, } - span.LogFields(ot_log.String("msg", "build process map")) + span.AddEvent("build process map") // otel proto conversion doesn't set jaeger processes for _, batch := range jaegerBatches { for _, s := range batch.Spans { @@ -250,17 +252,17 @@ func (b *Backend) calculateTimeRange() (int64, int64) { } func (b *Backend) GetServices(ctx context.Context) ([]string, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetOperations") - defer span.Finish() + ctx, span := tracer.Start(ctx, "tempo-query.GetOperations") + defer span.End() - return b.lookupTagValues(ctx, span, serviceSearchTag) + return b.lookupTagValues(ctx, serviceSearchTag) } func (b *Backend) GetOperations(ctx context.Context, _ jaeger_spanstore.OperationQueryParameters) ([]jaeger_spanstore.Operation, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.GetOperations") - defer span.Finish() + ctx, span := tracer.Start(ctx, "tempo-query.GetOperations") + defer span.End() - tagValues, err := b.lookupTagValues(ctx, span, operationSearchTag) + tagValues, err := b.lookupTagValues(ctx, operationSearchTag) if err != nil { return nil, err } @@ -277,15 +279,15 @@ func (b *Backend) GetOperations(ctx context.Context, _ jaeger_spanstore.Operatio } func (b *Backend) FindTraces(ctx context.Context, query *jaeger_spanstore.TraceQueryParameters) ([]*jaeger.Trace, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.FindTraces") - defer span.Finish() + ctx, span := tracer.Start(ctx, "tempo-query.FindTraces") + defer span.End() traceIDs, err := b.FindTraceIDs(ctx, query) if err != nil { return nil, err } - span.LogFields(ot_log.String("msg", fmt.Sprintf("Found %d trace IDs", len(traceIDs)))) + span.AddEvent(fmt.Sprintf("Found %d trace IDs", len(traceIDs))) // for every traceID, get the full trace var jaegerTraces []*jaeger.Trace @@ -293,21 +295,22 @@ func (b *Backend) FindTraces(ctx context.Context, query *jaeger_spanstore.TraceQ trace, err := b.GetTrace(ctx, traceID) if err != nil { // TODO this seems to be an internal inconsistency error, ignore so we can still show the rest - span.LogFields(ot_log.Error(fmt.Errorf("could not get trace for traceID %v: %w", traceID, err))) + span.AddEvent(fmt.Sprintf("could not get trace for traceID %v", traceID)) + span.RecordError(err) continue } jaegerTraces = append(jaegerTraces, trace) } - span.LogFields(ot_log.String("msg", fmt.Sprintf("Returning %d traces", len(jaegerTraces)))) + span.AddEvent(fmt.Sprintf("Returning %d traces", len(jaegerTraces))) return jaegerTraces, nil } func (b *Backend) FindTraceIDs(ctx context.Context, query *jaeger_spanstore.TraceQueryParameters) ([]jaeger.TraceID, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "tempo-query.FindTraceIDs") - defer span.Finish() + ctx, span := tracer.Start(ctx, "tempo-query.FindTraceIDs") + defer span.End() url := url.URL{ Scheme: b.apiSchema(), @@ -333,7 +336,7 @@ func (b *Backend) FindTraceIDs(ctx context.Context, query *jaeger_spanstore.Trac url.RawQuery = urlQuery.Encode() - req, err := b.newGetRequest(ctx, url.String(), span) + req, err := b.newGetRequest(ctx, url.String()) if err != nil { return nil, err } @@ -398,7 +401,7 @@ func createTagsQueryParam(service string, operation string, tags map[string]stri return tagsBuilder.String(), nil } -func (b *Backend) lookupTagValues(ctx context.Context, span opentracing.Span, tagName string) ([]string, error) { +func (b *Backend) lookupTagValues(ctx context.Context, tagName string) ([]string, error) { var url string if b.QueryServicesDuration == nil { @@ -408,7 +411,7 @@ func (b *Backend) lookupTagValues(ctx context.Context, span opentracing.Span, ta url = fmt.Sprintf("%s://%s/api/search/tag/%s/values?start=%d&end=%d", b.apiSchema(), b.tempoBackend, tagName, startTime, endTime) } - req, err := b.newGetRequest(ctx, url, span) + req, err := b.newGetRequest(ctx, url) if err != nil { return nil, err } @@ -445,20 +448,13 @@ func (b *Backend) WriteSpan(context.Context, *jaeger.Span) error { return nil } -func (b *Backend) newGetRequest(ctx context.Context, url string, span opentracing.Span) (*http.Request, error) { +func (b *Backend) newGetRequest(ctx context.Context, url string) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return nil, err } - if tracer := opentracing.GlobalTracer(); tracer != nil { - carrier := make(opentracing.TextMapCarrier, len(req.Header)) - for k, v := range req.Header { - carrier.Set(k, v[0]) - } - // this is not really loggable or anything we can react to. just ignoring this error - _ = tracer.Inject(span.Context(), opentracing.TextMap, carrier) - } + otel.GetTextMapPropagator().Inject(ctx, propagation.HeaderCarrier(req.Header)) // currently Jaeger Query will only propagate bearer token to the grpc backend and no other headers // so we are going to extract the tenant id from the header, if it exists and use it diff --git a/cmd/tempo/main.go b/cmd/tempo/main.go index c79cd046188..1cd8623d709 100644 --- a/cmd/tempo/main.go +++ b/cmd/tempo/main.go @@ -15,14 +15,13 @@ import ( "github.com/grafana/dskit/flagext" dslog "github.com/grafana/dskit/log" "github.com/grafana/dskit/spanprofiler" - "github.com/grafana/dskit/tracing" ot "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" "go.opentelemetry.io/otel" oc_bridge "go.opentelemetry.io/otel/bridge/opencensus" ot_bridge "go.opentelemetry.io/otel/bridge/opentracing" - "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" @@ -89,12 +88,7 @@ func main() { } // Init tracer - var shutdownTracer func() - if config.UseOTelTracer { - shutdownTracer, err = installOpenTelemetryTracer(config) - } else { - shutdownTracer, err = installOpenTracingTracer(config) - } + shutdownTracer, err := installOpenTelemetryTracer(config) if err != nil { level.Error(log.Logger).Log("msg", "error initialising tracer", "err", err) os.Exit(1) @@ -221,33 +215,15 @@ func loadConfig() (*app.Config, bool, error) { return config, configVerify, nil } -func installOpenTracingTracer(config *app.Config) (func(), error) { - level.Info(log.Logger).Log("msg", "initialising OpenTracing tracer") - - // Setting the environment variable JAEGER_AGENT_HOST enables tracing - trace, err := tracing.NewFromEnv(fmt.Sprintf("%s-%s", appName, config.Target)) - if err != nil { - return nil, fmt.Errorf("error initialising tracer: %w", err) - } - ot.SetGlobalTracer(spanprofiler.NewTracer(ot.GlobalTracer())) - - return func() { - if err := trace.Close(); err != nil { - level.Error(log.Logger).Log("msg", "error closing tracing", "err", err) - os.Exit(1) - } - }, nil -} - func installOpenTelemetryTracer(config *app.Config) (func(), error) { level.Info(log.Logger).Log("msg", "initialising OpenTelemetry tracer") // for now, migrate OpenTracing Jaeger environment variables migrateJaegerEnvironmentVariables() - exp, err := jaeger.New(jaeger.WithCollectorEndpoint()) + exp, err := otlptracehttp.New(context.Background(), otlptracehttp.WithInsecure()) if err != nil { - return nil, fmt.Errorf("failed to create Jaeger exporter: %w", err) + return nil, fmt.Errorf("failed to create OTLP exporter: %w", err) } resources, err := resource.New(context.Background(), diff --git a/go.mod b/go.mod index 711e1934ca8..4eb5999e277 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,6 @@ require ( github.com/gorilla/mux v1.8.1 github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7 github.com/grafana/e2e v0.1.1 - github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v1.6.2 github.com/hashicorp/go-plugin v1.6.0 github.com/jaegertracing/jaeger v1.55.0 @@ -45,7 +44,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.97.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.97.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.97.0 - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/errors v0.9.1 @@ -74,7 +73,6 @@ require ( go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/bridge/opencensus v1.23.1 go.opentelemetry.io/otel/bridge/opentracing v1.21.0 - go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/metric v1.24.0 go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 @@ -118,6 +116,7 @@ require ( go.opentelemetry.io/collector/otelcol v0.95.0 go.opentelemetry.io/collector/processor v0.97.0 go.opentelemetry.io/collector/receiver v0.97.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 go.opentelemetry.io/proto/otlp v1.1.0 @@ -317,7 +316,6 @@ require ( go.opentelemetry.io/collector/featuregate v1.4.0 // indirect go.opentelemetry.io/collector/service v0.95.0 // indirect go.opentelemetry.io/contrib/config v0.4.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.23.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1 // indirect diff --git a/go.sum b/go.sum index 1976711af1a..c1e44cf2efc 100644 --- a/go.sum +++ b/go.sum @@ -519,7 +519,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= @@ -1151,8 +1150,6 @@ go.opentelemetry.io/otel/bridge/opencensus v1.23.1 h1:QmGawK5vW6UdHXypZwWUuag27d go.opentelemetry.io/otel/bridge/opencensus v1.23.1/go.mod h1:TNxwRvdhakpilWQImJM/a4yd/8mgqDhRVC9Bph9wI/k= go.opentelemetry.io/otel/bridge/opentracing v1.21.0 h1:7AfuSFhyvBmt/0YskcdxDyTdHPjQfrHcZQo6Zu5srF4= go.opentelemetry.io/otel/bridge/opentracing v1.21.0/go.mod h1:giUOMajCV30LvlPHnzRDNBvDV3/NmrGVrqCp/1suDok= -go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= -go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1 h1:ZqRWZJGHXV/1yCcEEVJ6/Uz2JtM79DNS8OZYa3vVY/A= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1/go.mod h1:D7ynngPWlGJrqyGSDOdscuv7uqttfCE3jcBvffDv9y4= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.23.1 h1:q/Nj5/2TZRIt6PderQ9oU0M00fzoe8UZuINGw6ETGTw= diff --git a/modules/distributor/distributor.go b/modules/distributor/distributor.go index 159048ce621..4723ac3519f 100644 --- a/modules/distributor/distributor.go +++ b/modules/distributor/distributor.go @@ -17,12 +17,12 @@ import ( ring_client "github.com/grafana/dskit/ring/client" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/util/strutil" "github.com/segmentio/fasthash/fnv1a" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/otel" "google.golang.org/grpc/codes" "google.golang.org/grpc/health/grpc_health_v1" @@ -108,6 +108,8 @@ var ( }) ) +var tracer = otel.Tracer("distributor") + // rebatchedTrace is used to more cleanly pass the set of data type rebatchedTrace struct { id []byte @@ -307,8 +309,8 @@ func (d *Distributor) extractBasicInfo(ctx context.Context, traces ptrace.Traces // PushTraces pushes a batch of traces func (d *Distributor) PushTraces(ctx context.Context, traces ptrace.Traces) (*tempopb.PushResponse, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "distributor.PushTraces") - defer span.Finish() + ctx, span := tracer.Start(ctx, "distributor.PushTraces") + defer span.End() userID, spanCount, size, err := d.extractBasicInfo(ctx, traces) if err != nil { diff --git a/modules/distributor/forwarder/otlpgrpc/forwarder.go b/modules/distributor/forwarder/otlpgrpc/forwarder.go index 4b4ad18084b..70931bfb96d 100644 --- a/modules/distributor/forwarder/otlpgrpc/forwarder.go +++ b/modules/distributor/forwarder/otlpgrpc/forwarder.go @@ -9,10 +9,9 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/middleware" grpcmw "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/opentracing/opentracing-go" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.uber.org/multierr" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -122,6 +121,7 @@ func (f *Forwarder) newTraceOTLPGRPCClientAndConn(ctx context.Context, endpoint grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(grpcmw.ChainUnaryClient(unaryClientInterceptor...)), grpc.WithStreamInterceptor(grpcmw.ChainStreamClient(streamClientInterceptor...)), + grpc.WithStatsHandler(otelgrpc.NewClientHandler()), ) grpcClientConn, err := grpc.DialContext(ctx, endpoint, opts...) @@ -134,10 +134,8 @@ func (f *Forwarder) newTraceOTLPGRPCClientAndConn(ctx context.Context, endpoint func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { return []grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, }, []grpc.StreamClientInterceptor{ - otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), middleware.StreamClientUserHeaderInterceptor, } } diff --git a/modules/distributor/receiver/shim.go b/modules/distributor/receiver/shim.go index f043e548ba1..0d824123b49 100644 --- a/modules/distributor/receiver/shim.go +++ b/modules/distributor/receiver/shim.go @@ -14,7 +14,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" - "github.com/opentracing/opentracing-go" prom_client "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.opencensus.io/stats/view" @@ -28,6 +27,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/otel" metricnoop "go.opentelemetry.io/otel/metric/noop" tracenoop "go.opentelemetry.io/otel/trace/noop" "go.uber.org/multierr" @@ -62,6 +62,8 @@ var ( statReceiverKafka = usagestats.NewInt("receiver_enabled_kafka") ) +var tracer = otel.Tracer("distributor/receiver") + type RetryableError struct { err error st *status.Status @@ -332,8 +334,8 @@ func (r *receiversShim) stopping(_ error) error { // ConsumeTraces implements consumer.Trace func (r *receiversShim) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "distributor.ConsumeTraces") - defer span.Finish() + ctx, span := tracer.Start(ctx, "distributor.ConsumeTraces") + defer span.End() var err error diff --git a/modules/frontend/frontend.go b/modules/frontend/frontend.go index b45127d5f95..b96d2d8f490 100644 --- a/modules/frontend/frontend.go +++ b/modules/frontend/frontend.go @@ -10,6 +10,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" //nolint:all //deprecated + "go.opentelemetry.io/otel" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" @@ -47,6 +48,8 @@ type QueryFrontend struct { logger log.Logger } +var tracer = otel.Tracer("frontend") + // New returns a new QueryFrontend func New(cfg Config, next http.RoundTripper, o overrides.Interface, reader tempodb.Reader, cacheProvider cache.Provider, apiPrefix string, logger log.Logger, registerer prometheus.Registerer) (*QueryFrontend, error) { level.Info(logger).Log("msg", "creating middleware in query frontend") diff --git a/modules/frontend/handler.go b/modules/frontend/handler.go index c5fde84b703..4a9fbe2e3ff 100644 --- a/modules/frontend/handler.go +++ b/modules/frontend/handler.go @@ -10,12 +10,13 @@ import ( "time" "github.com/grafana/dskit/flagext" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/grafana/tempo/pkg/util/tracing" ) @@ -62,9 +63,9 @@ func (f *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { traceID, _ := tracing.ExtractTraceID(ctx) // add orgid to existing spans - span := opentracing.SpanFromContext(r.Context()) + span := trace.SpanFromContext(r.Context()) if span != nil { - span.SetTag("orgID", orgID) + span.SetAttributes(attribute.String("orgID", orgID)) } resp, err := f.roundTripper.RoundTrip(r) diff --git a/modules/frontend/metrics_query_range_sharder.go b/modules/frontend/metrics_query_range_sharder.go index 13baddab9bd..5c16d9399bf 100644 --- a/modules/frontend/metrics_query_range_sharder.go +++ b/modules/frontend/metrics_query_range_sharder.go @@ -11,7 +11,6 @@ import ( "github.com/go-kit/log" //nolint:all deprecated "github.com/gogo/protobuf/jsonpb" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/grafana/tempo/modules/frontend/combiner" "github.com/grafana/tempo/modules/frontend/pipeline" @@ -55,8 +54,8 @@ func newAsyncQueryRangeSharder(reader tempodb.Reader, o overrides.Interface, cfg } func (s queryRangeSharder) RoundTrip(r *http.Request) (pipeline.Responses[combiner.PipelineResponse], error) { - span, ctx := opentracing.StartSpanFromContext(r.Context(), "frontend.QueryRangeSharder") - defer span.Finish() + ctx, span := tracer.Start(r.Context(), "frontend.QueryRangeSharder") + defer span.End() var ( err error diff --git a/modules/frontend/pipeline/pipeline.go b/modules/frontend/pipeline/pipeline.go index f0382292a9c..4a064eede62 100644 --- a/modules/frontend/pipeline/pipeline.go +++ b/modules/frontend/pipeline/pipeline.go @@ -4,8 +4,11 @@ import ( "net/http" "github.com/grafana/tempo/modules/frontend/combiner" + "go.opentelemetry.io/otel" ) +var tracer = otel.Tracer("frontend/pipeline") + // // Async Pipeline // diff --git a/modules/frontend/pipeline/sync_handler_retry.go b/modules/frontend/pipeline/sync_handler_retry.go index 9c92ddd8448..0d67676076c 100644 --- a/modules/frontend/pipeline/sync_handler_retry.go +++ b/modules/frontend/pipeline/sync_handler_retry.go @@ -9,11 +9,10 @@ import ( "github.com/grafana/dskit/httpgrpc" "github.com/grafana/tempo/modules/frontend/queue" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - ot_log "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) func NewRetryWare(maxRetries int, registerer prometheus.Registerer) Middleware { @@ -42,9 +41,8 @@ type retryWare struct { // RoundTrip implements http.RoundTripper func (r retryWare) RoundTrip(req *http.Request) (*http.Response, error) { ctx := req.Context() - span, ctx := opentracing.StartSpanFromContext(ctx, "frontend.Retry") - defer span.Finish() - ext.SpanKindRPCClient.Set(span) + ctx, span := tracer.Start(ctx, "frontend.Retry", trace.WithSpanKind(trace.SpanKindClient)) + defer span.End() // context propagation req = req.WithContext(ctx) @@ -106,12 +104,11 @@ func (r retryWare) RoundTrip(req *http.Request) (*http.Response, error) { // https://github.com/grafana/tempo/issues/857 errMsg := fmt.Sprint(err) - span.LogFields( - ot_log.String("msg", "error processing request. retrying"), - ot_log.Int("try", tries), - ot_log.Int("status_code", statusCode), - ot_log.String("errMsg", errMsg), - ) + span.AddEvent("error processing request. retrying", trace.WithAttributes( + attribute.Int("try", tries), + attribute.Int("status_code", statusCode), + attribute.String("errMsg", errMsg), + )) } } diff --git a/modules/frontend/search_sharder.go b/modules/frontend/search_sharder.go index c4a9734fc17..d4349b1d290 100644 --- a/modules/frontend/search_sharder.go +++ b/modules/frontend/search_sharder.go @@ -9,7 +9,6 @@ import ( "github.com/go-kit/log" //nolint:all deprecated "github.com/gogo/protobuf/jsonpb" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/segmentio/fasthash/fnv1a" "github.com/grafana/tempo/modules/frontend/combiner" @@ -86,8 +85,8 @@ func (s asyncSearchSharder) RoundTrip(r *http.Request) (pipeline.Responses[combi if err != nil { return pipeline.NewBadRequest(err), nil } - span, ctx := opentracing.StartSpanFromContext(requestCtx, "frontend.ShardSearch") - defer span.Finish() + ctx, span := tracer.Start(requestCtx, "frontend.ShardSearch") + defer span.End() // calculate and enforce max search duration maxDuration := s.maxDuration(tenantID) diff --git a/modules/frontend/tag_sharder.go b/modules/frontend/tag_sharder.go index 8ff5e66673f..0305ccec4b2 100644 --- a/modules/frontend/tag_sharder.go +++ b/modules/frontend/tag_sharder.go @@ -16,7 +16,6 @@ import ( "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb" "github.com/grafana/tempo/tempodb/backend" - "github.com/opentracing/opentracing-go" "github.com/segmentio/fasthash/fnv1a" ) @@ -201,8 +200,8 @@ func (s searchTagSharder) RoundTrip(r *http.Request) (pipeline.Responses[combine if err != nil { return pipeline.NewBadRequest(err), nil } - span, ctx := opentracing.StartSpanFromContext(requestCtx, "frontend.ShardSearchTags") - defer span.Finish() + ctx, span := tracer.Start(requestCtx, "frontend.ShardSearchTags") + defer span.End() // calculate and enforce max search duration maxDuration := s.maxDuration(tenantID) diff --git a/modules/frontend/traceid_sharder.go b/modules/frontend/traceid_sharder.go index a71aae98508..3c9e01c3092 100644 --- a/modules/frontend/traceid_sharder.go +++ b/modules/frontend/traceid_sharder.go @@ -7,7 +7,6 @@ import ( "github.com/go-kit/log" //nolint:all //deprecated "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/grafana/tempo/modules/frontend/combiner" "github.com/grafana/tempo/modules/frontend/pipeline" @@ -40,8 +39,8 @@ func newAsyncTraceIDSharder(cfg *TraceByIDConfig, logger log.Logger) pipeline.As // RoundTrip implements http.RoundTripper func (s asyncTraceSharder) RoundTrip(r *http.Request) (pipeline.Responses[combiner.PipelineResponse], error) { - span, ctx := opentracing.StartSpanFromContext(r.Context(), "frontend.ShardQuery") - defer span.Finish() + ctx, span := tracer.Start(r.Context(), "frontend.ShardQuery") + defer span.End() r = r.WithContext(ctx) reqs, err := s.buildShardedRequests(ctx, r) diff --git a/modules/frontend/v1/frontend.go b/modules/frontend/v1/frontend.go index e5f4ade8e69..e02df7ba928 100644 --- a/modules/frontend/v1/frontend.go +++ b/modules/frontend/v1/frontend.go @@ -8,6 +8,8 @@ import ( "time" "github.com/grafana/dskit/flagext" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -15,7 +17,6 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" "github.com/grafana/tempo/pkg/util/httpgrpcutil" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -25,6 +26,8 @@ import ( "github.com/grafana/tempo/pkg/validation" ) +var tracer = otel.Tracer("frontend/v1") + // Config for a Frontend. type Config struct { MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` @@ -71,7 +74,7 @@ type Frontend struct { type request struct { enqueueTime time.Time - queueSpan opentracing.Span + queueSpan trace.Span originalCtx context.Context request *httpgrpc.HTTPRequest @@ -164,14 +167,8 @@ func (f *Frontend) cleanupInactiveUserMetrics(user string) { // RoundTripGRPC round trips a proto (instead of a HTTP request). func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { // Propagate trace context in gRPC too - this will be ignored if using HTTP. - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) - if tracer != nil && span != nil { - carrier := (*httpgrpcutil.HttpgrpcHeadersCarrier)(req) - err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - if err != nil { - return nil, err - } - } + carrier := (*httpgrpcutil.HttpgrpcHeadersCarrier)(req) + otel.GetTextMapPropagator().Inject(ctx, carrier) request := request{ request: req, @@ -230,7 +227,7 @@ func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { req := reqWrapper.(*request) f.queueDuration.Observe(time.Since(req.enqueueTime).Seconds()) - req.queueSpan.Finish() + req.queueSpan.End() // only add if not expired if req.originalCtx.Err() != nil { @@ -366,7 +363,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { now := time.Now() req.enqueueTime = now - req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") + _, req.queueSpan = tracer.Start(ctx, "queued") // aggregate the max queriers limit in the case of a multi tenant query maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, f.limits.MaxQueriersPerUser) diff --git a/modules/generator/client/client.go b/modules/generator/client/client.go index f2ea99cc395..645d31e4477 100644 --- a/modules/generator/client/client.go +++ b/modules/generator/client/client.go @@ -8,8 +8,7 @@ import ( "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/middleware" ring_client "github.com/grafana/dskit/ring/client" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/health/grpc_health_v1" @@ -44,6 +43,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func New(addr string, cfg Config) (*Client, error) { opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithStatsHandler(otelgrpc.NewClientHandler()), } instrumentationOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation()) @@ -65,10 +65,8 @@ func New(addr string, cfg Config) (*Client, error) { func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { return []grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, }, []grpc.StreamClientInterceptor{ - otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), middleware.StreamClientUserHeaderInterceptor, } } diff --git a/modules/generator/generator.go b/modules/generator/generator.go index fdbacac3a20..a1cf308d9fd 100644 --- a/modules/generator/generator.go +++ b/modules/generator/generator.go @@ -14,8 +14,9 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.uber.org/atomic" "github.com/grafana/tempo/modules/generator/storage" @@ -33,6 +34,8 @@ const ( ringNumTokens = 256 ) +var tracer = otel.Tracer("generator") + var ( ErrUnconfigured = errors.New("no metrics_generator.storage.path configured, metrics generator will be disabled") ErrReadOnly = errors.New("metrics-generator is shutting down") @@ -189,14 +192,14 @@ func (g *Generator) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest return nil, ErrReadOnly } - span, ctx := opentracing.StartSpanFromContext(ctx, "generator.PushSpans") - defer span.Finish() + ctx, span := tracer.Start(ctx, "generator.PushSpans") + defer span.End() instanceID, err := user.ExtractOrgID(ctx) if err != nil { return nil, err } - span.SetTag("instanceID", instanceID) + span.SetAttributes(attribute.String("instanceID", instanceID)) instance, err := g.getOrCreateInstance(instanceID) if err != nil { diff --git a/modules/generator/http.go b/modules/generator/http.go index 5af54b949c6..9a347be4faf 100644 --- a/modules/generator/http.go +++ b/modules/generator/http.go @@ -6,7 +6,7 @@ import ( "time" "github.com/gogo/protobuf/jsonpb" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" "github.com/grafana/tempo/pkg/api" "github.com/grafana/tempo/pkg/tempopb" @@ -16,10 +16,10 @@ func (g *Generator) SpanMetricsHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(g.cfg.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Generator.SpanMetricsHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Generator.SpanMetricsHandler") + defer span.End() - span.SetTag("requestURI", r.RequestURI) + span.SetAttributes(attribute.String("requestURI", r.RequestURI)) req, err := api.ParseSpanMetricsRequest(r) if err != nil { @@ -47,10 +47,10 @@ func (g *Generator) QueryRangeHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(g.cfg.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Generator.QueryRangeHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Generator.QueryRangeHandler") + defer span.End() - span.SetTag("requestURI", r.RequestURI) + span.SetAttributes(attribute.String("requestURI", r.RequestURI)) req, err := api.ParseQueryRangeRequest(r) if err != nil { diff --git a/modules/generator/processor/localblocks/processor.go b/modules/generator/processor/localblocks/processor.go index d19be0dadfd..debaaee1e8b 100644 --- a/modules/generator/processor/localblocks/processor.go +++ b/modules/generator/processor/localblocks/processor.go @@ -12,7 +12,9 @@ import ( "github.com/go-kit/log/level" "github.com/golang/groupcache/lru" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" gen "github.com/grafana/tempo/modules/generator/processor" @@ -29,6 +31,8 @@ import ( "github.com/grafana/tempo/tempodb/wal" ) +var tracer = otel.Tracer("generator/processor/localblocks") + const timeBuffer = 5 * time.Minute // ProcessorOverrides is just the set of overrides needed here. @@ -495,11 +499,11 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque m := b.BlockMeta() - span, ctx := opentracing.StartSpanFromContext(ctx, "Processor.QueryRange.Block", opentracing.Tags{ - "block": m.BlockID, - "blockSize": m.Size, - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "Processor.QueryRange.Block", trace.WithAttributes( + attribute.String("block", m.BlockID.String()), + attribute.Int64("blockSize", int64(m.Size)), + )) + defer span.End() // TODO - caching f := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { diff --git a/modules/generator/processor/servicegraphs/servicegraphs.go b/modules/generator/processor/servicegraphs/servicegraphs.go index fbe4201d5a2..9ebdfce287f 100644 --- a/modules/generator/processor/servicegraphs/servicegraphs.go +++ b/modules/generator/processor/servicegraphs/servicegraphs.go @@ -10,10 +10,10 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/util/strutil" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.18.0" @@ -27,6 +27,8 @@ import ( tempo_util "github.com/grafana/tempo/pkg/util" ) +var tracer = otel.Tracer("generator/processor/servicegraphs") + var ( metricDroppedSpans = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "tempo", @@ -151,8 +153,8 @@ func (p *Processor) Name() string { } func (p *Processor) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) { - span, _ := opentracing.StartSpanFromContext(ctx, "servicegraphs.PushSpans") - defer span.Finish() + _, span := tracer.Start(ctx, "servicegraphs.PushSpans") + defer span.End() if err := p.consume(req.Batches); err != nil { var tmsErr *tooManySpansError diff --git a/modules/generator/processor/spanmetrics/spanmetrics.go b/modules/generator/processor/spanmetrics/spanmetrics.go index 11e331a9ecc..31d29aaa5ad 100644 --- a/modules/generator/processor/spanmetrics/spanmetrics.go +++ b/modules/generator/processor/spanmetrics/spanmetrics.go @@ -4,8 +4,8 @@ import ( "context" "time" - "github.com/opentracing/opentracing-go" "github.com/prometheus/prometheus/util/strutil" + "go.opentelemetry.io/otel" gen "github.com/grafana/tempo/modules/generator/processor" processor_util "github.com/grafana/tempo/modules/generator/processor/util" @@ -25,6 +25,8 @@ const ( targetInfo = "traces_target_info" ) +var tracer = otel.Tracer("generator/processor/spanmetrics") + type Processor struct { Cfg Config @@ -104,8 +106,8 @@ func (p *Processor) Name() string { } func (p *Processor) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) { - span, _ := opentracing.StartSpanFromContext(ctx, "spanmetrics.PushSpans") - defer span.Finish() + _, span := tracer.Start(ctx, "spanmetrics.PushSpans") + defer span.End() p.aggregateMetrics(req.Batches) } diff --git a/modules/ingester/client/client.go b/modules/ingester/client/client.go index 0fb1b518602..0601af04f2d 100644 --- a/modules/ingester/client/client.go +++ b/modules/ingester/client/client.go @@ -8,8 +8,7 @@ import ( "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/middleware" ring_client "github.com/grafana/dskit/ring/client" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/health/grpc_health_v1" @@ -45,6 +44,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func New(addr string, cfg Config) (*Client, error) { opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithStatsHandler(otelgrpc.NewClientHandler()), } instrumentationOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation()) @@ -67,10 +67,8 @@ func New(addr string, cfg Config) (*Client, error) { func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { return []grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, }, []grpc.StreamClientInterceptor{ - otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), middleware.StreamClientUserHeaderInterceptor, } } diff --git a/modules/ingester/flush.go b/modules/ingester/flush.go index 88a718ee8f6..b6b1084063c 100644 --- a/modules/ingester/flush.go +++ b/modules/ingester/flush.go @@ -13,12 +13,11 @@ import ( "github.com/google/uuid" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - ot "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/uber/jaeger-client-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/util/log" ) @@ -291,21 +290,20 @@ func (i *Ingester) handleComplete(op *flushOp) (retry bool, err error) { // withSpan adds traceID to a logger, if span is sampled // TODO: move into some central trace/log package -func withSpan(logger gklog.Logger, sp ot.Span) gklog.Logger { +func withSpan(logger gklog.Logger, sp trace.Span) gklog.Logger { if sp == nil { return logger } - sctx, ok := sp.Context().(jaeger.SpanContext) - if !ok || !sctx.IsSampled() { + if !sp.SpanContext().IsSampled() { return logger } - return gklog.With(logger, "traceID", sctx.TraceID().String()) + return gklog.With(logger, "traceID", sp.SpanContext().TraceID().String()) } func (i *Ingester) handleFlush(ctx context.Context, userID string, blockID uuid.UUID) (retry bool, err error) { - sp, ctx := ot.StartSpanFromContext(ctx, "flush", ot.Tag{Key: "organization", Value: userID}, ot.Tag{Key: "blockID", Value: blockID.String()}) - defer sp.Finish() + ctx, sp := tracer.Start(ctx, "flush", trace.WithAttributes(attribute.String("organization", userID), attribute.String("blockID", blockID.String()))) + defer sp.End() withSpan(level.Info(log.Logger), sp).Log("msg", "flushing block", "userid", userID, "block", blockID.String()) instance, err := i.getOrCreateInstance(userID) @@ -327,8 +325,8 @@ func (i *Ingester) handleFlush(ctx context.Context, userID string, blockID uuid. metricFlushDuration.Observe(time.Since(start).Seconds()) metricFlushSize.Observe(float64(block.BlockMeta().Size)) if err != nil { - ext.Error.Set(sp, true) - sp.LogFields(otlog.Error(err)) + sp.SetStatus(codes.Error, "") + sp.RecordError(err) return true, err } diff --git a/modules/ingester/ingester.go b/modules/ingester/ingester.go index 571458e6d7f..3a24db2ae36 100644 --- a/modules/ingester/ingester.go +++ b/modules/ingester/ingester.go @@ -12,10 +12,11 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" - ot_log "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + oteltrace "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "google.golang.org/grpc/codes" @@ -43,6 +44,8 @@ var metricFlushQueueLength = promauto.NewGauge(prometheus.GaugeOpts{ Help: "The total number of series pending in the flush queue.", }) +var tracer = otel.Tracer("ingester") + const ( ingesterRingKey = "ring" ) @@ -265,8 +268,8 @@ func (i *Ingester) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDRequ } // tracing instrumentation - span, ctx := opentracing.StartSpanFromContext(ctx, "Ingester.FindTraceByID") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Ingester.FindTraceByID") + defer span.End() instanceID, err := user.ExtractOrgID(ctx) if err != nil { @@ -282,7 +285,7 @@ func (i *Ingester) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDRequ return nil, err } - span.LogFields(ot_log.Bool("trace found", trace != nil)) + span.AddEvent("trace found", oteltrace.WithAttributes(attribute.Bool("found", trace != nil))) return &tempopb.TraceByIDResponse{ Trace: trace, diff --git a/modules/ingester/instance.go b/modules/ingester/instance.go index 5db973aa11a..aac14a69cce 100644 --- a/modules/ingester/instance.go +++ b/modules/ingester/instance.go @@ -15,7 +15,6 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/status" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/atomic" @@ -410,8 +409,8 @@ func (i *instance) ClearFlushedBlocks(completeBlockTimeout time.Duration) error } func (i *instance) FindTraceByID(ctx context.Context, id []byte) (*tempopb.Trace, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.FindTraceByID") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.FindTraceByID") + defer span.End() var err error var completeTrace *tempopb.Trace diff --git a/modules/ingester/instance_search.go b/modules/ingester/instance_search.go index ce0ffa7832d..1b120314ba8 100644 --- a/modules/ingester/instance_search.go +++ b/modules/ingester/instance_search.go @@ -7,7 +7,8 @@ import ( "sync" "github.com/google/uuid" - ot_log "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "github.com/go-kit/log/level" @@ -21,12 +22,11 @@ import ( "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" ) func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tempopb.SearchResponse, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.Search") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.Search") + defer span.End() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -37,7 +37,7 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem maxResults = 20 } - span.LogFields(ot_log.String("SearchRequest", req.String())) + span.AddEvent("SearchRequest", trace.WithAttributes(attribute.String("request", req.String()))) var ( resultsMtx = sync.Mutex{} @@ -48,11 +48,11 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem ) search := func(blockID uuid.UUID, block common.Searcher, spanName string) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.searchBlock."+spanName) - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.searchBlock."+spanName) + defer span.End() - span.LogFields(ot_log.Event("block entry mtx acquired")) - span.SetTag("blockID", blockID) + span.AddEvent("block entry mtx acquired") + span.SetAttributes(attribute.String("blockID", blockID.String())) var resp *tempopb.SearchResponse var err error @@ -115,7 +115,7 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem // then headblockMtx. Even if the likelihood is low it is a statistical certainly // that eventually a deadlock will occur. i.headBlockMtx.RLock() - span.LogFields(ot_log.String("msg", "acquired headblock mtx")) + span.AddEvent("acquired headblock mtx") if includeBlock(i.headBlock.BlockMeta(), req) { search(i.headBlock.BlockMeta().BlockID, i.headBlock, "headBlock") } @@ -136,7 +136,7 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem // and then attempting to retake the lock. i.blocksMtx.RLock() defer i.blocksMtx.RUnlock() - span.LogFields(ot_log.String("msg", "acquired blocks mtx")) + span.AddEvent("acquired blocks mtx") wg := sync.WaitGroup{} @@ -175,8 +175,8 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem } func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.SearchTagsResponse, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTags") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTags") + defer span.End() userID, err := user.ExtractOrgID(ctx) if err != nil { @@ -200,8 +200,8 @@ func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.Searc distinctValues := util.NewDistinctStringCollector(limit) search := func(ctx context.Context, s common.Searcher, dv *util.DistinctStringCollector, spanName string) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTags."+spanName) - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTags."+spanName) + defer span.End() if s == nil { return nil @@ -218,7 +218,7 @@ func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.Searc } i.headBlockMtx.RLock() - span.LogFields(ot_log.String("msg", "acquired headblock mtx")) + span.AddEvent("acquired headblock mtx") err = search(ctx, i.headBlock, distinctValues, "headBlock") i.headBlockMtx.RUnlock() if err != nil { @@ -227,7 +227,7 @@ func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.Searc i.blocksMtx.RLock() defer i.blocksMtx.RUnlock() - span.LogFields(ot_log.String("msg", "acquired blocks mtx")) + span.AddEvent("acquired blocks mtx") for _, b := range i.completingBlocks { if err = search(ctx, b, distinctValues, "completingBlock"); err != nil { @@ -251,8 +251,8 @@ func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.Searc // SearchTagsV2 calls SearchTags for each scope and returns the results. func (i *instance) SearchTagsV2(ctx context.Context, scope string) (*tempopb.SearchTagsV2Response, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTagsV2") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTagsV2") + defer span.End() scopes := []string{scope} if scope == "" { @@ -375,8 +375,8 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag return nil, err } - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTagValuesV2") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTagValuesV2") + defer span.End() limit := i.limiter.limits.MaxBytesPerTagValuesQuery(userID) valueCollector := util.NewDistinctValueCollector[tempopb.TagValue](limit, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) @@ -438,12 +438,12 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag // then headblockMtx. Even if the likelihood is low it is a statistical certainly // that eventually a deadlock will occur. i.headBlockMtx.RLock() - span.LogFields(ot_log.String("msg", "acquired headblock mtx")) + span.AddEvent("acquired headblock mtx") if i.headBlock != nil { wg.Add(1) go func() { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTagValuesV2.headBlock") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTagValuesV2.headBlock") + defer span.End() defer i.headBlockMtx.RUnlock() defer wg.Done() if err := searchBlock(ctx, i.headBlock); err != nil { @@ -454,14 +454,14 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag i.blocksMtx.RLock() defer i.blocksMtx.RUnlock() - span.LogFields(ot_log.String("msg", "acquired blocks mtx")) + span.AddEvent("acquired blocks mtx") // completed blocks for _, b := range i.completeBlocks { wg.Add(1) go func(b *localBlock) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTagValuesV2.completedBlock") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTagValuesV2.completedBlock") + defer span.End() defer wg.Done() if err := searchBlock(ctx, b); err != nil { anyErr.Store(fmt.Errorf("unexpected error searching complete block (%s): %w", b.BlockMeta().BlockID, err)) @@ -473,8 +473,8 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag for _, b := range i.completingBlocks { wg.Add(1) go func(b common.WALBlock) { - span, ctx := opentracing.StartSpanFromContext(ctx, "instance.SearchTagValuesV2.completingBlock") - defer span.Finish() + ctx, span := tracer.Start(ctx, "instance.SearchTagValuesV2.completingBlock") + defer span.End() defer wg.Done() if err := searchBlock(ctx, b); err != nil { anyErr.Store(fmt.Errorf("unexpected error searching completing block (%s): %w", b.BlockMeta().BlockID, err)) diff --git a/modules/ingester/local_block.go b/modules/ingester/local_block.go index d45e5d7d2ea..dc620aa3614 100644 --- a/modules/ingester/local_block.go +++ b/modules/ingester/local_block.go @@ -12,7 +12,6 @@ import ( "github.com/grafana/tempo/tempodb/backend/local" "github.com/grafana/tempo/tempodb/encoding" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" ) const nameFlushed = "flushed" @@ -50,8 +49,8 @@ func newLocalBlock(ctx context.Context, existingBlock common.BackendBlock, l *lo } func (c *localBlock) FindTraceByID(ctx context.Context, id common.ID, opts common.SearchOptions) (*tempopb.Trace, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "localBlock.FindTraceByID") - defer span.Finish() + ctx, span := tracer.Start(ctx, "localBlock.FindTraceByID") + defer span.End() return c.BackendBlock.FindTraceByID(ctx, id, opts) } diff --git a/modules/overrides/overrides.go b/modules/overrides/overrides.go index 4f4affc4ac1..48f85bec2bf 100644 --- a/modules/overrides/overrides.go +++ b/modules/overrides/overrides.go @@ -2,10 +2,13 @@ package overrides import ( "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel" ) const wildcardTenant = "*" +var tracer = otel.Tracer("overrides") + var metricOverridesLimitsDesc = prometheus.NewDesc( "tempo_limits_overrides", "Resource limit overrides applied to tenants", diff --git a/modules/overrides/user_configurable_overrides.go b/modules/overrides/user_configurable_overrides.go index 4734c0b79cf..9078ab6eae2 100644 --- a/modules/overrides/user_configurable_overrides.go +++ b/modules/overrides/user_configurable_overrides.go @@ -13,7 +13,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/services" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/exp/maps" @@ -151,8 +150,8 @@ func (o *userConfigurableOverridesManager) stopping(error) error { } func (o *userConfigurableOverridesManager) reloadAllTenantLimits(ctx context.Context) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "userConfigurableOverridesManager.reloadAllTenantLimits") - defer span.Finish() + ctx, span := tracer.Start(ctx, "userConfigurableOverridesManager.reloadAllTenantLimits") + defer span.End() traceID, _ := tracing.ExtractTraceID(ctx) level.Info(o.logger).Log("msg", "reloading all tenant limits", "traceID", traceID) diff --git a/modules/overrides/userconfigurable/api/api.go b/modules/overrides/userconfigurable/api/api.go index 7cb1984981e..8aca15668be 100644 --- a/modules/overrides/userconfigurable/api/api.go +++ b/modules/overrides/userconfigurable/api/api.go @@ -13,7 +13,9 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/services" jsoniter "github.com/json-iterator/go" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/modules/overrides/userconfigurable/client" @@ -24,6 +26,8 @@ import ( var errConflictingRuntimeOverrides = errors.New("tenant has conflicting overrides set in runtime config, contact your system administrator to perform changes through the API") +var tracer = otel.Tracer("overrides/userconfigurable/api") + type Validator interface { Validate(limits *client.Limits) error } @@ -71,22 +75,22 @@ func (a *UserConfigOverridesAPI) stopping(_ error) error { // get the Limits. Can return backend.ErrDoesNotExist func (a *UserConfigOverridesAPI) get(ctx context.Context, userID string) (*client.Limits, backend.Version, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "UserConfigOverridesAPI.get", opentracing.Tags{ - "userID": userID, - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "UserConfigOverridesAPI.get", trace.WithAttributes( + attribute.String("userID", userID), + )) + defer span.End() return a.client.Get(ctx, userID) } // set the Limits. Can return backend.ErrVersionDoesNotMatch, validationError func (a *UserConfigOverridesAPI) set(ctx context.Context, userID string, limits *client.Limits, version backend.Version, skipConflictingOverridesCheck bool) (backend.Version, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "UserConfigOverridesAPI.set", opentracing.Tags{ - "userID": userID, - "version": version, - "limits": logLimits(limits), - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "UserConfigOverridesAPI.set", trace.WithAttributes( + attribute.String("userID", userID), + attribute.String("version", string(version)), + attribute.String("limits", logLimits(limits)), + )) + defer span.End() traceID, _ := tracing.ExtractTraceID(ctx) err := a.validator.Validate(limits) @@ -110,10 +114,10 @@ func (a *UserConfigOverridesAPI) set(ctx context.Context, userID string, limits } func (a *UserConfigOverridesAPI) update(ctx context.Context, userID string, patch []byte, skipConflictingOverridesCheck bool) (*client.Limits, backend.Version, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "UserConfigOverridesAPI.update", opentracing.Tags{ - "userID": userID, - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "UserConfigOverridesAPI.update", trace.WithAttributes( + attribute.String("userID", userID), + )) + defer span.End() traceID, _ := tracing.ExtractTraceID(ctx) currLimits, currVersion, err := a.client.Get(ctx, userID) @@ -157,11 +161,11 @@ func (a *UserConfigOverridesAPI) update(ctx context.Context, userID string, patc } func (a *UserConfigOverridesAPI) delete(ctx context.Context, userID string, version backend.Version) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "UserConfigOverridesAPI.delete", opentracing.Tags{ - "userID": userID, - "version": version, - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "UserConfigOverridesAPI.delete", trace.WithAttributes( + attribute.String("userID", userID), + attribute.String("version", string(version)), + )) + defer span.End() traceID, _ := tracing.ExtractTraceID(ctx) level.Info(a.logger).Log("traceID", traceID, "msg", "deleting user-configurable overrides", "userID", userID, "version", version) diff --git a/modules/overrides/userconfigurable/api/http.go b/modules/overrides/userconfigurable/api/http.go index 52999a5aa04..0b46b15fc54 100644 --- a/modules/overrides/userconfigurable/api/http.go +++ b/modules/overrides/userconfigurable/api/http.go @@ -11,9 +11,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/user" jsoniter "github.com/json-iterator/go" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - ot_log "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel/codes" "github.com/grafana/tempo/modules/overrides/userconfigurable/client" "github.com/grafana/tempo/pkg/api" @@ -202,7 +200,7 @@ func writeLimits(w http.ResponseWriter, limits *client.Limits, version backend.V } func (a *UserConfigOverridesAPI) logRequest(ctx context.Context, handler string, r *http.Request) (context.Context, func(*error)) { - span, ctx := opentracing.StartSpanFromContext(ctx, handler) + ctx, span := tracer.Start(ctx, handler) traceID, _ := tracing.ExtractTraceID(ctx) level.Info(a.logger).Log("traceID", traceID, "method", r.Method, "url", r.URL.RequestURI(), "user-agent", r.UserAgent()) @@ -211,11 +209,11 @@ func (a *UserConfigOverridesAPI) logRequest(ctx context.Context, handler string, err := *errPtr if err != nil && !errors.Is(err, backend.ErrDoesNotExist) { - span.LogFields(ot_log.Error(err)) - ext.Error.Set(span, true) + span.RecordError(err) + span.SetStatus(codes.Error, "") level.Error(a.logger).Log("traceID", traceID, "method", r.Method, "url", r.URL.RequestURI(), "user-agent", r.UserAgent(), "err", err) } - span.Finish() + span.End() } } diff --git a/modules/overrides/userconfigurable/client/client.go b/modules/overrides/userconfigurable/client/client.go index 668e62eefaa..6c3a2fba98d 100644 --- a/modules/overrides/userconfigurable/client/client.go +++ b/modules/overrides/userconfigurable/client/client.go @@ -11,9 +11,11 @@ import ( "github.com/go-kit/log/level" jsoniter "github.com/json-iterator/go" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb/backend" @@ -29,6 +31,8 @@ const ( OverridesFileName = "overrides.json" ) +var tracer = otel.Tracer("overrides/userconfigurable/client") + var ( metricList = promauto.NewCounter(prometheus.CounterOpts{ Namespace: "tempo", @@ -144,8 +148,8 @@ func initBackend(cfg *Config) (rw backend.VersionedReaderWriter, err error) { } func (o *clientImpl) List(ctx context.Context) ([]string, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "clientImpl.List") - defer span.Finish() + ctx, span := tracer.Start(ctx, "clientImpl.List") + defer span.End() metricList.Inc() @@ -153,8 +157,8 @@ func (o *clientImpl) List(ctx context.Context) ([]string, error) { } func (o *clientImpl) Get(ctx context.Context, userID string) (tenantLimits *Limits, version backend.Version, err error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "clientImpl.Get", opentracing.Tag{Key: "tenant", Value: userID}) - defer span.Finish() + ctx, span := tracer.Start(ctx, "clientImpl.Get", trace.WithAttributes(attribute.String("tenant", userID))) + defer span.End() metricFetch.WithLabelValues(userID).Inc() defer func() { @@ -175,8 +179,8 @@ func (o *clientImpl) Get(ctx context.Context, userID string) (tenantLimits *Limi } func (o *clientImpl) Set(ctx context.Context, userID string, limits *Limits, version backend.Version) (backend.Version, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "clientImpl.Set", opentracing.Tag{Key: "tenant", Value: userID}) - defer span.Finish() + ctx, span := tracer.Start(ctx, "clientImpl.Set", trace.WithAttributes(attribute.String("tenant", userID))) + defer span.End() data, err := jsoniter.Marshal(limits) if err != nil { @@ -187,8 +191,8 @@ func (o *clientImpl) Set(ctx context.Context, userID string, limits *Limits, ver } func (o *clientImpl) Delete(ctx context.Context, userID string, version backend.Version) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "clientImpl.Delete", opentracing.Tag{Key: "tenant", Value: userID}) - defer span.Finish() + ctx, span := tracer.Start(ctx, "clientImpl.Delete", trace.WithAttributes(attribute.String("tenant", userID))) + defer span.End() return o.rw.DeleteVersioned(ctx, OverridesFileName, []string{OverridesKeyPath, userID}, version) } diff --git a/modules/querier/http.go b/modules/querier/http.go index 057bacc97bc..98d5289dc1b 100644 --- a/modules/querier/http.go +++ b/modules/querier/http.go @@ -9,8 +9,8 @@ import ( "github.com/golang/protobuf/jsonpb" //nolint:all //deprecated "github.com/golang/protobuf/proto" //nolint:all //ProtoReflect - "github.com/opentracing/opentracing-go" - ot_log "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel/attribute" + oteltrace "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/api" "github.com/grafana/tempo/pkg/model/trace" @@ -34,8 +34,8 @@ func (q *Querier) TraceByIDHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.TraceByID.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.TraceByIDHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.TraceByIDHandler") + defer span.End() byteID, err := api.ParseTraceID(r) if err != nil { @@ -49,13 +49,13 @@ func (q *Querier) TraceByIDHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - span.LogFields( - ot_log.String("msg", "validated request"), - ot_log.String("blockStart", blockStart), - ot_log.String("blockEnd", blockEnd), - ot_log.String("queryMode", queryMode), - ot_log.String("timeStart", fmt.Sprint(timeStart)), - ot_log.String("timeEnd", fmt.Sprint(timeEnd))) + span.AddEvent("validated request", oteltrace.WithAttributes( + attribute.String("blockStart", blockStart), + attribute.String("blockEnd", blockEnd), + attribute.String("queryMode", queryMode), + attribute.String("timeStart", fmt.Sprint(timeStart)), + attribute.String("timeEnd", fmt.Sprint(timeEnd)), + )) resp, err := q.FindTraceByID(ctx, &tempopb.TraceByIDRequest{ TraceID: byteID, @@ -75,7 +75,7 @@ func (q *Querier) TraceByIDHandler(w http.ResponseWriter, r *http.Request) { } if r.Header.Get(api.HeaderAccept) == api.HeaderAcceptProtobuf { - span.SetTag("contentType", api.HeaderAcceptProtobuf) + span.SetAttributes(attribute.String("contentType", api.HeaderAcceptProtobuf)) b, err := proto.Marshal(resp) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -90,7 +90,7 @@ func (q *Querier) TraceByIDHandler(w http.ResponseWriter, r *http.Request) { return } - span.SetTag("contentType", api.HeaderAcceptJSON) + span.SetAttributes(attribute.String("contentType", api.HeaderAcceptJSON)) marshaller := &jsonpb.Marshaler{} err = marshaller.Marshal(w, resp) if err != nil { @@ -107,11 +107,11 @@ func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SearchHandler") + defer span.End() - span.SetTag("requestURI", r.RequestURI) - span.SetTag("isSearchBlock", isSearchBlock) + span.SetAttributes(attribute.String("requestURI", r.RequestURI)) + span.SetAttributes(attribute.Bool("isSearchBlock", isSearchBlock)) var resp *tempopb.SearchResponse if !isSearchBlock { @@ -121,7 +121,7 @@ func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { return } - span.SetTag("SearchRequest", req.String()) + span.SetAttributes(attribute.String("SearchRequest", req.String())) resp, err = q.SearchRecent(ctx, req) if err != nil { @@ -135,7 +135,7 @@ func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { return } - span.SetTag("SearchRequestBlock", req.String()) + span.SetAttributes(attribute.String("SearchRequestBlock", req.String())) resp, err = q.SearchBlock(ctx, req) if err != nil { @@ -160,8 +160,8 @@ func (q *Querier) SearchTagsHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagsHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SearchTagsHandler") + defer span.End() if !isSearchBlock { req, err := api.ParseSearchTagsRequest(r) @@ -209,8 +209,8 @@ func (q *Querier) SearchTagsV2Handler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagsHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SearchTagsHandler") + defer span.End() if !isSearchBlock { req, err := api.ParseSearchTagsRequest(r) @@ -259,8 +259,8 @@ func (q *Querier) SearchTagValuesHandler(w http.ResponseWriter, r *http.Request) ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagValuesHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SearchTagValuesHandler") + defer span.End() if !isSearchBlock { req, err := api.ParseSearchTagValuesRequest(r) @@ -309,8 +309,8 @@ func (q *Querier) SearchTagValuesV2Handler(w http.ResponseWriter, r *http.Reques ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagValuesHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SearchTagValuesHandler") + defer span.End() if !isSearchBlock { req, err := api.ParseSearchTagValuesRequestV2(r) @@ -357,8 +357,8 @@ func (q *Querier) SpanMetricsSummaryHandler(w http.ResponseWriter, r *http.Reque ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SpanMetricsSummaryHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.SpanMetricsSummaryHandler") + defer span.End() req, err := api.ParseSpanMetricsSummaryRequest(r) if err != nil { @@ -391,24 +391,24 @@ func (q *Querier) QueryRangeHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.Search.QueryTimeout)) defer cancel() - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.QueryRangeHandler") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.QueryRangeHandler") + defer span.End() - errHandler := func(ctx context.Context, span opentracing.Span, err error) { + errHandler := func(ctx context.Context, span oteltrace.Span, err error) { if errors.Is(err, context.Canceled) { // todo: context is also canceled when we hit the query timeout. research what the behavior is // ignore this error. we regularly cancel context once queries are complete - span.SetTag("error", err.Error()) + span.RecordError(err) return } if ctx.Err() != nil { - span.SetTag("error", ctx.Err()) + span.RecordError(ctx.Err()) return } if err != nil { - span.SetTag("error", err.Error()) + span.RecordError(err) } } @@ -445,11 +445,11 @@ func (q *Querier) QueryRangeHandler(w http.ResponseWriter, r *http.Request) { return } - span.SetTag("query", req.Query) - span.SetTag("shard", req.ShardID) - span.SetTag("shardCount", req.ShardCount) - span.SetTag("step", time.Duration(req.Step)) - span.SetTag("interval", time.Unix(0, int64(req.End)).Sub(time.Unix(0, int64(req.Start)))) + span.SetAttributes(attribute.String("query", req.Query)) + span.SetAttributes(attribute.Int64("shard", int64(req.ShardID))) + span.SetAttributes(attribute.Int64("shardCount", int64(req.ShardCount))) + span.SetAttributes(attribute.Int64("step", time.Duration(req.Step).Nanoseconds())) + span.SetAttributes(attribute.Int64("interval", time.Unix(0, int64(req.End)).Sub(time.Unix(0, int64(req.Start))).Nanoseconds())) resp, err = q.QueryRange(ctx, req) if err != nil { @@ -458,8 +458,8 @@ func (q *Querier) QueryRangeHandler(w http.ResponseWriter, r *http.Request) { } if resp != nil && resp.Metrics != nil { - span.SetTag("inspectedBytes", resp.Metrics.InspectedBytes) - span.SetTag("inspectedSpans", resp.Metrics.InspectedSpans) + span.SetAttributes(attribute.Int64("inspectedBytes", int64(resp.Metrics.InspectedBytes))) + span.SetAttributes(attribute.Int64("inspectedSpans", int64(resp.Metrics.InspectedSpans))) } } diff --git a/modules/querier/querier.go b/modules/querier/querier.go index 47bddf98f6d..fe0e4b79588 100644 --- a/modules/querier/querier.go +++ b/modules/querier/querier.go @@ -16,10 +16,11 @@ import ( ring_client "github.com/grafana/dskit/ring/client" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - "github.com/opentracing/opentracing-go" - ot_log "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + oteltrace "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "go.uber.org/multierr" "golang.org/x/sync/semaphore" @@ -43,6 +44,8 @@ import ( "github.com/grafana/tempo/tempodb/encoding/common" ) +var tracer = otel.Tracer("querier") + var ( metricIngesterClients = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "tempo", @@ -231,10 +234,10 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque return nil, fmt.Errorf("error extracting org id in Querier.FindTraceByID: %w", err) } - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.FindTraceByID") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.FindTraceByID") + defer span.End() - span.SetTag("queryMode", req.QueryMode) + span.SetAttributes(attribute.String("queryMode", req.QueryMode)) maxBytes := q.limits.MaxBytesPerTrace(userID) combiner := trace.NewCombiner(maxBytes) @@ -250,7 +253,7 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque } // get responses from all ingesters in parallel - span.LogFields(ot_log.String("msg", "searching ingesters")) + span.AddEvent("searching ingesters") responses, err := q.forIngesterRings(ctx, userID, getRSFn, func(funcCtx context.Context, client tempopb.QuerierClient) (interface{}, error) { return client.FindTraceByID(funcCtx, req) }) @@ -272,22 +275,23 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque found = true } } - span.LogFields(ot_log.String("msg", "done searching ingesters"), - ot_log.Bool("found", found), - ot_log.Int("combinedSpans", spanCountTotal), - ot_log.Int("combinedTraces", traceCountTotal)) + span.AddEvent("done searching ingesters", oteltrace.WithAttributes( + attribute.Bool("found", found), + attribute.Int("combinedSpans", spanCountTotal), + attribute.Int("combinedTraces", traceCountTotal))) } if req.QueryMode == QueryModeBlocks || req.QueryMode == QueryModeAll { - span.LogFields(ot_log.String("msg", "searching store")) - span.LogFields(ot_log.String("timeStart", fmt.Sprint(timeStart))) - span.LogFields(ot_log.String("timeEnd", fmt.Sprint(timeEnd))) + span.AddEvent("searching store", oteltrace.WithAttributes( + attribute.Int64("timeStart", timeStart), + attribute.Int64("timeEnd", timeEnd), + )) opts := common.DefaultSearchOptionsWithMaxBytes(maxBytes) partialTraces, blockErrs, err := q.store.Find(ctx, userID, req.TraceID, req.BlockStart, req.BlockEnd, timeStart, timeEnd, opts) if err != nil { retErr := fmt.Errorf("error querying store in Querier.FindTraceByID: %w", err) - ot_log.Error(retErr) + span.RecordError(retErr) return nil, retErr } @@ -295,9 +299,8 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque return nil, multierr.Combine(blockErrs...) } - span.LogFields( - ot_log.String("msg", "done searching store"), - ot_log.Int("foundPartialTraces", len(partialTraces))) + span.AddEvent("done searching store", oteltrace.WithAttributes( + attribute.Int("foundPartialTraces", len(partialTraces)))) for _, partialTrace := range partialTraces { _, err = combiner.Consume(partialTrace) @@ -390,8 +393,8 @@ func (q *Querier) forIngesterRings(ctx context.Context, userID string, getReplic } func forOneIngesterRing(ctx context.Context, replicationSet ring.ReplicationSet, f forEachFn, pool *ring_client.Pool, extraQueryDelay time.Duration) ([]interface{}, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.forOneIngester") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.forOneIngester") + defer span.End() doFunc := func(funcCtx context.Context, ingester *ring.InstanceDesc) (interface{}, error) { if funcCtx.Err() != nil { @@ -426,8 +429,8 @@ func (q *Querier) forGivenGenerators( return nil, ctx.Err() } - span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.forGivenGenerators") - defer span.Finish() + ctx, span := tracer.Start(ctx, "Querier.forGivenGenerators") + defer span.End() doFunc := func(funcCtx context.Context, generator *ring.InstanceDesc) (interface{}, error) { if funcCtx.Err() != nil { diff --git a/modules/querier/querier_query_range.go b/modules/querier/querier_query_range.go index f1e7cd4722a..15e090ce017 100644 --- a/modules/querier/querier_query_range.go +++ b/modules/querier/querier_query_range.go @@ -15,8 +15,9 @@ import ( "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" "github.com/uber-go/atomic" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) func (q *Querier) QueryRange(ctx context.Context, req *tempopb.QueryRangeRequest) (*tempopb.QueryRangeResponse, error) { @@ -116,11 +117,11 @@ func (q *Querier) queryBackend(ctx context.Context, req *tempopb.QueryRangeReque go func(m *backend.BlockMeta) { defer wg.Done() - span, ctx := opentracing.StartSpanFromContext(ctx, "querier.queryBackEnd.Block", opentracing.Tags{ - "block": m.BlockID.String(), - "blockSize": m.Size, - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "querier.queryBackEnd.Block", trace.WithAttributes( + attribute.String("block", m.BlockID.String()), + attribute.Int64("blockSize", int64(m.Size)), + )) + defer span.End() f := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { return q.store.Fetch(ctx, m, req, common.DefaultSearchOptions()) diff --git a/modules/querier/worker/frontend_processor.go b/modules/querier/worker/frontend_processor.go index 1b7c8af303d..1b945df4249 100644 --- a/modules/querier/worker/frontend_processor.go +++ b/modules/querier/worker/frontend_processor.go @@ -14,9 +14,9 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/tempo/pkg/util/httpgrpcutil" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -162,15 +162,10 @@ func (fp *frontendProcessor) runRequests(ctx context.Context, requests []*httpgr } func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest) *httpgrpc.HTTPResponse { - tracer := opentracing.GlobalTracer() - // Ignore errors here. If we cannot get parent span, we just don't create new one. - parentSpanContext, _ := httpgrpcutil.GetParentSpanForRequest(tracer, request) - if parentSpanContext != nil { - queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "querier_processor_runRequest", opentracing.ChildOf(parentSpanContext)) - defer queueSpan.Finish() - - ctx = spanCtx - } + carrier := (*httpgrpcutil.HttpgrpcHeadersCarrier)(request) + ctx = otel.GetTextMapPropagator().Extract(ctx, carrier) + ctx, queueSpan := tracer.Start(ctx, "querier_processor_runRequest") + defer queueSpan.End() response, err := fp.handler.Handle(ctx, request) if err != nil { diff --git a/modules/querier/worker/worker.go b/modules/querier/worker/worker.go index 4433bede1af..db15126e02f 100644 --- a/modules/querier/worker/worker.go +++ b/modules/querier/worker/worker.go @@ -15,11 +15,14 @@ import ( "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/services" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel" "google.golang.org/grpc" "github.com/grafana/tempo/pkg/util" ) +var tracer = otel.Tracer("querier/worker") + type Config struct { FrontendAddress string `yaml:"frontend_address"` DNSLookupPeriod time.Duration `yaml:"dns_lookup_duration"` diff --git a/pkg/cache/background.go b/pkg/cache/background.go index 2e5b4fa0a23..c0c55cc70eb 100644 --- a/pkg/cache/background.go +++ b/pkg/cache/background.go @@ -5,10 +5,10 @@ import ( "flag" "sync" - opentracing "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // BackgroundConfig is config for a Background Cache. @@ -97,9 +97,9 @@ func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byt c.queueLength.Add(float64(num)) default: c.droppedWriteBack.Add(float64(num)) - sp := opentracing.SpanFromContext(ctx) + sp := trace.SpanFromContext(ctx) if sp != nil { - sp.LogFields(otlog.Int("dropped", num)) + sp.AddEvent("dropped", trace.WithAttributes(attribute.Int("dropped", num))) } return // queue is full; give up } diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index f6b629cf670..a3b4057d407 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -4,6 +4,7 @@ import ( "context" "github.com/grafana/dskit/services" + "go.opentelemetry.io/otel" ) type Role string @@ -20,6 +21,8 @@ const ( RoleParquetPage Role = "parquet-page" ) +var tracer = otel.Tracer("cache") + // Provider is an object that can return a cache for a requested role type Provider interface { services.Service diff --git a/pkg/cache/redis_cache.go b/pkg/cache/redis_cache.go index ff4e370a351..764aaeba8ab 100644 --- a/pkg/cache/redis_cache.go +++ b/pkg/cache/redis_cache.go @@ -6,9 +6,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" instr "github.com/grafana/dskit/instrument" - otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel/attribute" util_log "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/pkg/util/spanlogger" @@ -62,8 +62,8 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, // Run a tracked request, using c.requestDuration to monitor requests. err := instr.CollectedRequest(ctx, method, c.requestDuration, redisStatusCode, func(ctx context.Context) error { log, _ := spanlogger.New(ctx, method) - defer log.Finish() - log.LogFields(otlog.Int("keys requested", len(keys))) + defer log.End() + log.SetAttributes(attribute.Int("keys requested", len(keys))) var err error items, err = c.redis.MGet(ctx, keys) @@ -74,7 +74,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, return err } - log.LogFields(otlog.Int("keys found", len(items))) + log.SetAttributes(attribute.Int("keys found", len(items))) return nil }) diff --git a/pkg/parquetquery/iters.go b/pkg/parquetquery/iters.go index 22cb3f05e23..d5aa9d5edb0 100644 --- a/pkg/parquetquery/iters.go +++ b/pkg/parquetquery/iters.go @@ -12,8 +12,10 @@ import ( "github.com/grafana/tempo/pkg/parquetquery/intern" "github.com/grafana/tempo/pkg/util" - "github.com/opentracing/opentracing-go" pq "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // RowNumber is the sequence of row numbers uniquely identifying a value @@ -35,6 +37,8 @@ type RowNumber [6]int64 const MaxDefinitionLevel = 5 +var tracer = otel.Tracer("parquetquery") + // EmptyRowNumber creates an empty invalid row number. func EmptyRowNumber() RowNumber { return RowNumber{-1, -1, -1, -1, -1, -1} @@ -536,7 +540,7 @@ type SyncIterator struct { filter *InstrumentedPredicate // Status - span opentracing.Span + span trace.Span curr RowNumber currRowGroup pq.RowGroup currRowGroupMin RowNumber @@ -571,10 +575,10 @@ func NewSyncIterator(ctx context.Context, rgs []pq.RowGroup, column int, columnN rn.Skip(rg.NumRows()) } - span, _ := opentracing.StartSpanFromContext(ctx, "syncIterator", opentracing.Tags{ - "columnIndex": column, - "column": columnName, - }) + _, span := tracer.Start(ctx, "syncIterator", trace.WithAttributes( + attribute.Int("columnIndex", column), + attribute.String("column", columnName), + )) at := IteratorResult{} if selectAs != "" { @@ -997,13 +1001,13 @@ func (c *SyncIterator) makeResult(t RowNumber, v *pq.Value) *IteratorResult { func (c *SyncIterator) Close() { c.closeCurrRowGroup() - c.span.SetTag("inspectedColumnChunks", c.filter.InspectedColumnChunks) - c.span.SetTag("inspectedPages", c.filter.InspectedPages) - c.span.SetTag("inspectedValues", c.filter.InspectedValues) - c.span.SetTag("keptColumnChunks", c.filter.KeptColumnChunks) - c.span.SetTag("keptPages", c.filter.KeptPages) - c.span.SetTag("keptValues", c.filter.KeptValues) - c.span.Finish() + c.span.SetAttributes(attribute.Int64("inspectedColumnChunks", c.filter.InspectedColumnChunks)) + c.span.SetAttributes(attribute.Int64("inspectedPages", c.filter.InspectedPages)) + c.span.SetAttributes(attribute.Int64("inspectedValues", c.filter.InspectedValues)) + c.span.SetAttributes(attribute.Int64("keptColumnChunks", c.filter.KeptColumnChunks)) + c.span.SetAttributes(attribute.Int64("keptPages", c.filter.KeptPages)) + c.span.SetAttributes(attribute.Int64("keptValues", c.filter.KeptValues)) + c.span.End() if c.intern && c.interner != nil { c.interner.Close() @@ -1065,18 +1069,17 @@ func (c *ColumnIterator) String() string { func (c *ColumnIterator) iterate(ctx context.Context, readSize int) { defer close(c.ch) - span, _ := opentracing.StartSpanFromContext(ctx, "columnIterator.iterate", opentracing.Tags{ - "columnIndex": c.col, - "column": c.colName, - }) + _, span := tracer.Start(ctx, "columnIterator.iterate", trace.WithAttributes( + attribute.Int("columnIndex", c.col), + attribute.String("column", c.colName), + )) defer func() { - span.SetTag("inspectedColumnChunks", c.filter.InspectedColumnChunks) - span.SetTag("inspectedPages", c.filter.InspectedPages) - span.SetTag("inspectedValues", c.filter.InspectedValues) - span.SetTag("keptColumnChunks", c.filter.KeptColumnChunks) - span.SetTag("keptPages", c.filter.KeptPages) - span.SetTag("keptValues", c.filter.KeptValues) - span.Finish() + span.SetAttributes(attribute.Int64("inspectedColumnChunks", c.filter.InspectedColumnChunks)) + span.SetAttributes(attribute.Int64("inspectedPages", c.filter.InspectedPages)) + span.SetAttributes(attribute.Int64("inspectedValues", c.filter.InspectedValues)) + span.SetAttributes(attribute.Int64("keptColumnChunks", c.filter.KeptColumnChunks)) + span.SetAttributes(attribute.Int64("keptPages", c.filter.KeptPages)) + span.SetAttributes(attribute.Int64("keptValues", c.filter.KeptValues)) }() rn := EmptyRowNumber() diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index f98e915c65c..bd3d53bf80b 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -7,7 +7,8 @@ import ( "io" "time" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/tempopb" common_v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" @@ -46,8 +47,8 @@ func (e *Engine) Compile(query string) (*RootExpr, SpansetFilterFunc, metricsFir } func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchRequest, spanSetFetcher SpansetFetcher) (*tempopb.SearchResponse, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "traceql.Engine.ExecuteSearch") - defer span.Finish() + ctx, span := tracer.Start(ctx, "traceql.Engine.ExecuteSearch") + defer span.End() rootExpr, err := e.parseQuery(searchReq) if err != nil { @@ -56,8 +57,7 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq fetchSpansRequest := e.createFetchSpansRequest(searchReq, rootExpr.Pipeline) - span.SetTag("pipeline", rootExpr.Pipeline) - span.SetTag("fetchSpansRequest", fetchSpansRequest) + span.SetAttributes(attribute.String("pipeline", rootExpr.Pipeline.String())) // calculate search meta conditions. metaConditions := SearchMetaConditionsWithout(fetchSpansRequest.Conditions) @@ -72,7 +72,7 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq evalSS, err := rootExpr.Pipeline.evaluate([]*Spanset{inSS}) if err != nil { - span.LogKV("msg", "pipeline.evaluate", "err", err) + span.RecordError(err, trace.WithAttributes(attribute.String("msg", "pipeline.evaluate"))) return nil, err } @@ -113,7 +113,7 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq for { spanset, err := iterator.Next(ctx) if err != nil && !errors.Is(err, io.EOF) { - span.LogKV("msg", "iterator.Next", "err", err) + span.RecordError(err, trace.WithAttributes(attribute.String("msg", "iterator.Next"))) return nil, err } if spanset == nil { @@ -127,14 +127,14 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq } res.Traces = combiner.Metadata() - span.SetTag("spansets_evaluated", spansetsEvaluated) - span.SetTag("spansets_found", len(res.Traces)) + span.SetAttributes(attribute.Int("spansets_evaluated", spansetsEvaluated)) + span.SetAttributes(attribute.Int("spansets_found", len(res.Traces))) // Bytes can be nil when callback is no set if fetchSpansResponse.Bytes != nil { // InspectedBytes is used to compute query throughput and SLO metrics res.Metrics.InspectedBytes = fetchSpansResponse.Bytes() - span.SetTag("inspectedBytes", res.Metrics.InspectedBytes) + span.SetAttributes(attribute.Int64("inspectedBytes", int64(res.Metrics.InspectedBytes))) } return res, nil @@ -147,10 +147,10 @@ func (e *Engine) ExecuteTagValues( cb AutocompleteCallback, fetcher AutocompleteFetcher, ) error { - span, ctx := opentracing.StartSpanFromContext(ctx, "traceql.Engine.ExecuteTagValues") - defer span.Finish() + ctx, span := tracer.Start(ctx, "traceql.Engine.ExecuteTagValues") + defer span.End() - span.SetTag("sanitized query", query) + span.SetAttributes(attribute.String("sanitized query", query)) rootExpr, err := Parse(query) if err != nil { @@ -165,8 +165,7 @@ func (e *Engine) ExecuteTagValues( autocompleteReq := e.createAutocompleteRequest(tag, rootExpr.Pipeline) - span.SetTag("pipeline", rootExpr.Pipeline) - span.SetTag("autocompleteReq", autocompleteReq) + span.SetAttributes(attribute.String("pipeline", rootExpr.Pipeline.String())) return fetcher.Fetch(ctx, autocompleteReq, cb) } diff --git a/pkg/traceql/util.go b/pkg/traceql/util.go index 745ac9b14df..6c78396ec0c 100644 --- a/pkg/traceql/util.go +++ b/pkg/traceql/util.go @@ -1,6 +1,11 @@ package traceql -import "github.com/grafana/tempo/pkg/tempopb" +import ( + "github.com/grafana/tempo/pkg/tempopb" + "go.opentelemetry.io/otel" +) + +var tracer = otel.Tracer("traceql") func MakeCollectTagValueFunc(collect func(tempopb.TagValue) bool) func(v Static) bool { return func(v Static) bool { diff --git a/pkg/util/http.go b/pkg/util/http.go index 45f8da37418..d1be761e767 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -15,8 +15,8 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "gopkg.in/yaml.v2" ) @@ -136,9 +136,9 @@ const ( // ParseProtoReader parses a compressed proto from an io.Reader. func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSize int, req proto.Message, compression CompressionType) error { - sp := opentracing.SpanFromContext(ctx) + sp := trace.SpanFromContext(ctx) if sp != nil { - sp.LogFields(otlog.String("event", "util.ParseProtoRequest[start reading]")) + sp.SetAttributes(attribute.String("event", "util.ParseProtoRequest[start reading]")) } body, err := decompressRequest(reader, expectedSize, maxSize, compression, sp) if err != nil { @@ -146,7 +146,7 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSi } if sp != nil { - sp.LogFields(otlog.String("event", "util.ParseProtoRequest[unmarshal]"), otlog.Int("size", len(body))) + sp.SetAttributes(attribute.String("event", "util.ParseProtoRequest[unmarshal]"), attribute.Int("size", len(body))) } // We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first, @@ -164,7 +164,7 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSi return nil } -func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) (body []byte, err error) { +func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp trace.Span) (body []byte, err error) { defer func() { if err != nil && len(body) > maxSize { err = fmt.Errorf(messageSizeLargerErrFmt, len(body), maxSize) @@ -182,7 +182,7 @@ func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression return } -func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) { +func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp trace.Span) ([]byte, error) { var ( buf bytes.Buffer body []byte @@ -208,7 +208,7 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi return body, err } -func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) { +func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression CompressionType, sp trace.Span) ([]byte, error) { if len(buffer.Bytes()) > maxSize { return nil, fmt.Errorf(messageSizeLargerErrFmt, len(buffer.Bytes()), maxSize) } @@ -217,8 +217,8 @@ func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression Compres return buffer.Bytes(), nil case RawSnappy: if sp != nil { - sp.LogFields(otlog.String("event", "util.ParseProtoRequest[decompress]"), - otlog.Int("size", len(buffer.Bytes()))) + sp.SetAttributes(attribute.String("event", "util.ParseProtoRequest[decompress]"), + attribute.Int("size", len(buffer.Bytes()))) } size, err := snappy.DecodedLen(buffer.Bytes()) if err != nil { diff --git a/pkg/util/httpgrpcutil/carrier.go b/pkg/util/httpgrpcutil/carrier.go index 0fda1126f32..e7b93f5edc5 100644 --- a/pkg/util/httpgrpcutil/carrier.go +++ b/pkg/util/httpgrpcutil/carrier.go @@ -1,16 +1,21 @@ package httpgrpcutil import ( - "errors" - - "github.com/opentracing/opentracing-go" - "github.com/grafana/dskit/httpgrpc" ) // Used to transfer trace information from/to HTTP request. type HttpgrpcHeadersCarrier httpgrpc.HTTPRequest +func (c *HttpgrpcHeadersCarrier) Get(key string) string { + for _, h := range c.Headers { + if h.Key == key { + return h.Values[0] + } + } + return "" +} + func (c *HttpgrpcHeadersCarrier) Set(key, val string) { c.Headers = append(c.Headers, &httpgrpc.Header{ Key: key, @@ -18,26 +23,10 @@ func (c *HttpgrpcHeadersCarrier) Set(key, val string) { }) } -func (c *HttpgrpcHeadersCarrier) ForeachKey(handler func(key, val string) error) error { +func (c *HttpgrpcHeadersCarrier) Keys() []string { + k := make([]string, 0, len(c.Headers)) for _, h := range c.Headers { - for _, v := range h.Values { - if err := handler(h.Key, v); err != nil { - return err - } - } - } - return nil -} - -func GetParentSpanForRequest(tracer opentracing.Tracer, req *httpgrpc.HTTPRequest) (opentracing.SpanContext, error) { - if tracer == nil { - return nil, nil - } - - carrier := (*HttpgrpcHeadersCarrier)(req) - extracted, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - if errors.Is(err, opentracing.ErrSpanContextNotFound) { - err = nil + k = append(k, h.Key) } - return extracted, err + return k } diff --git a/pkg/util/spanlogger/noop.go b/pkg/util/spanlogger/noop.go deleted file mode 100644 index b067be6ea2e..00000000000 --- a/pkg/util/spanlogger/noop.go +++ /dev/null @@ -1,54 +0,0 @@ -package spanlogger - -import ( - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -type noopTracer struct{} - -type ( - noopSpan struct{} - noopSpanContext struct{} -) - -var ( - defaultNoopSpanContext = noopSpanContext{} - defaultNoopSpan = noopSpan{} - defaultNoopTracer = noopTracer{} -) - -const ( - emptyString = "" -) - -func (n noopSpanContext) ForeachBaggageItem(func(k, v string) bool) {} - -func (n noopSpan) Context() opentracing.SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(string, string) opentracing.Span { return defaultNoopSpan } -func (n noopSpan) BaggageItem(string) string { return emptyString } -func (n noopSpan) SetTag(string, interface{}) opentracing.Span { return n } -func (n noopSpan) LogFields(...log.Field) {} -func (n noopSpan) LogKV(...interface{}) {} -func (n noopSpan) Finish() {} -func (n noopSpan) FinishWithOptions(opentracing.FinishOptions) {} -func (n noopSpan) SetOperationName(string) opentracing.Span { return n } -func (n noopSpan) Tracer() opentracing.Tracer { return defaultNoopTracer } -func (n noopSpan) LogEvent(string) {} -func (n noopSpan) LogEventWithPayload(string, interface{}) {} -func (n noopSpan) Log(opentracing.LogData) {} - -// StartSpan belongs to the Tracer interface. -func (n noopTracer) StartSpan(string, ...opentracing.StartSpanOption) opentracing.Span { - return defaultNoopSpan -} - -// Inject belongs to the Tracer interface. -func (n noopTracer) Inject(opentracing.SpanContext, interface{}, interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n noopTracer) Extract(interface{}, interface{}) (opentracing.SpanContext, error) { - return nil, opentracing.ErrSpanContextNotFound -} diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go index fea13b7bd60..3336a185951 100644 --- a/pkg/util/spanlogger/spanlogger.go +++ b/pkg/util/spanlogger/spanlogger.go @@ -2,12 +2,15 @@ package spanlogger import ( "context" + "fmt" "github.com/go-kit/log" "github.com/go-kit/log/level" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - otlog "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "github.com/grafana/dskit/tenant" util_log "github.com/grafana/tempo/pkg/util/log" @@ -19,12 +22,16 @@ const ( TenantIDTagName = "tenant_ids" ) -var loggerCtxKey = &loggerCtxMarker{} +var ( + tracer = otel.Tracer("") + defaultNoopSpan = noop.Span{} + loggerCtxKey = &loggerCtxMarker{} +) // SpanLogger unifies tracing and logging, to reduce repetition. type SpanLogger struct { log.Logger - opentracing.Span + trace.Span } // New makes a new SpanLogger, where logs will be sent to the global logger. @@ -36,9 +43,9 @@ func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, // to. The provided context will have the logger attached to it and can be // retrieved with FromContext or FromContextWithFallback. func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { - span, ctx := opentracing.StartSpanFromContext(ctx, method) + ctx, span := tracer.Start(ctx, method) if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 { - span.SetTag(TenantIDTagName, ids) + span.SetAttributes(attribute.StringSlice(TenantIDTagName, ids)) } logger := &SpanLogger{ Logger: log.With(util_log.WithContext(ctx, l), "method", method), @@ -69,7 +76,7 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg if !ok { logger = fallback } - sp := opentracing.SpanFromContext(ctx) + sp := trace.SpanFromContext(ctx) if sp == nil { sp = defaultNoopSpan } @@ -83,11 +90,20 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg // also puts the on the spans. func (s *SpanLogger) Log(kvps ...interface{}) error { s.Logger.Log(kvps...) - fields, err := otlog.InterleavedKVToFields(kvps...) - if err != nil { - return err + + for i := 0; i*2 < len(kvps); i++ { + key, ok := kvps[i*2].(string) + if !ok { + return fmt.Errorf("non-string key (pair #%d): %T", i, kvps[i*2]) + } + + switch t := kvps[i*2+1].(type) { + case bool: + s.Span.SetAttributes(attribute.Bool(key, t)) + case string: + s.Span.SetAttributes(attribute.String(key, t)) + } } - s.Span.LogFields(fields...) return nil } @@ -96,7 +112,8 @@ func (s *SpanLogger) Error(err error) error { if err == nil { return nil } - ext.Error.Set(s.Span, true) - s.Span.LogFields(otlog.Error(err)) + + s.Span.SetStatus(codes.Error, "") + s.Span.RecordError(err) return err } diff --git a/tempodb/backend/azure/v1/v1.go b/tempodb/backend/azure/v1/v1.go index d06d0cf82f1..56cff2be591 100644 --- a/tempodb/backend/azure/v1/v1.go +++ b/tempodb/backend/azure/v1/v1.go @@ -15,7 +15,9 @@ import ( blob "github.com/Azure/azure-storage-blob-go/azblob" "github.com/go-kit/log/level" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb/backend" @@ -29,6 +31,8 @@ const ( maxParallelism = 3 ) +var tracer = otel.Tracer("tempodb/backend/azure/v1") + type V1 struct { cfg *config.Config containerURL blob.ContainerURL @@ -80,8 +84,8 @@ func New(cfg *config.Config, confirm bool) (*V1, error) { func (rw *V1) Write(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, _ int64, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.Write") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.Write") + defer span.End() return rw.writer(derivedCtx, bufio.NewReader(data), backend.ObjectFileName(keypath, name)) } @@ -162,8 +166,8 @@ func (rw *V1) List(ctx context.Context, keypath backend.KeyPath) ([]string, erro // ListBlocks implements backend.Reader func (rw *V1) ListBlocks(ctx context.Context, tenant string) ([]uuid.UUID, []uuid.UUID, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "V1.ListBlocks") - defer span.Finish() + ctx, span := tracer.Start(ctx, "V1.ListBlocks") + defer span.End() var ( blockIDs = make([]uuid.UUID, 0, 1000) @@ -230,8 +234,8 @@ func (rw *V1) ListBlocks(ctx context.Context, tenant string) ([]uuid.UUID, []uui func (rw *V1) Read(ctx context.Context, name string, keypath backend.KeyPath, _ *backend.CacheInfo) (io.ReadCloser, int64, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.Read") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.Read") + defer span.End() object := backend.ObjectFileName(keypath, name) b, _, err := rw.readAll(derivedCtx, object) @@ -246,11 +250,11 @@ func (rw *V1) Read(ctx context.Context, name string, keypath backend.KeyPath, _ func (rw *V1) ReadRange(ctx context.Context, name string, keypath backend.KeyPath, offset uint64, buffer []byte, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.ReadRange", opentracing.Tags{ - "len": len(buffer), - "offset": offset, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.ReadRange", trace.WithAttributes( + attribute.Int("len", len(buffer)), + attribute.Int64("offset", int64(offset)), + )) + defer span.End() object := backend.ObjectFileName(keypath, name) err := rw.readRange(derivedCtx, object, int64(offset), buffer) @@ -307,8 +311,8 @@ func (rw *V1) DeleteVersioned(ctx context.Context, name string, keypath backend. func (rw *V1) ReadVersioned(ctx context.Context, name string, keypath backend.KeyPath) (io.ReadCloser, backend.Version, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.ReadVersioned") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.ReadVersioned") + defer span.End() object := backend.ObjectFileName(keypath, name) b, etag, err := rw.readAll(derivedCtx, object) diff --git a/tempodb/backend/azure/v2/v2.go b/tempodb/backend/azure/v2/v2.go index 56f7126d60d..cb9e75b05a0 100644 --- a/tempodb/backend/azure/v2/v2.go +++ b/tempodb/backend/azure/v2/v2.go @@ -21,7 +21,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/go-kit/log/level" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb/backend" @@ -35,6 +37,8 @@ const ( maxParallelism = 3 ) +var tracer = otel.Tracer("tempodb/backend/azure/v2") + type V2 struct { cfg *config.Config containerClient *container.Client @@ -86,8 +90,8 @@ func New(cfg *config.Config, confirm bool) (*V2, error) { func (rw *V2) Write(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, _ int64, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.Write") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.Write") + defer span.End() return rw.writer(derivedCtx, bufio.NewReader(data), backend.ObjectFileName(keypath, name)) } @@ -167,8 +171,8 @@ func (rw *V2) List(ctx context.Context, keypath backend.KeyPath) ([]string, erro // ListBlocks implements backend.Reader func (rw *V2) ListBlocks(ctx context.Context, tenant string) ([]uuid.UUID, []uuid.UUID, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "V2.ListBlocks") - defer span.Finish() + ctx, span := tracer.Start(ctx, "V2.ListBlocks") + defer span.End() var ( blockIDs = make([]uuid.UUID, 0, 1000) @@ -231,8 +235,8 @@ func (rw *V2) ListBlocks(ctx context.Context, tenant string) ([]uuid.UUID, []uui func (rw *V2) Read(ctx context.Context, name string, keypath backend.KeyPath, _ *backend.CacheInfo) (io.ReadCloser, int64, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.Read") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.Read") + defer span.End() object := backend.ObjectFileName(keypath, name) b, _, err := rw.readAll(derivedCtx, object) @@ -247,11 +251,11 @@ func (rw *V2) Read(ctx context.Context, name string, keypath backend.KeyPath, _ func (rw *V2) ReadRange(ctx context.Context, name string, keypath backend.KeyPath, offset uint64, buffer []byte, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.ReadRange", opentracing.Tags{ - "len": len(buffer), - "offset": offset, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.ReadRange", trace.WithAttributes( + attribute.Int("len", len(buffer)), + attribute.Int64("offset", int64(offset)), + )) + defer span.End() object := backend.ObjectFileName(keypath, name) err := rw.readRange(derivedCtx, object, int64(offset), buffer) @@ -308,8 +312,8 @@ func (rw *V2) DeleteVersioned(ctx context.Context, name string, keypath backend. func (rw *V2) ReadVersioned(ctx context.Context, name string, keypath backend.KeyPath) (io.ReadCloser, backend.Version, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "azure.ReadVersioned") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "azure.ReadVersioned") + defer span.End() object := backend.ObjectFileName(keypath, name) b, etag, err := rw.readAll(derivedCtx, object) diff --git a/tempodb/backend/gcs/gcs.go b/tempodb/backend/gcs/gcs.go index 1d4b346e68b..39a58570a5b 100644 --- a/tempodb/backend/gcs/gcs.go +++ b/tempodb/backend/gcs/gcs.go @@ -14,12 +14,15 @@ import ( "sync" "github.com/google/uuid" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/tempodb/backend/instrumentation" "cloud.google.com/go/storage" "github.com/cristalhq/hedgedhttp" - opentracing "github.com/opentracing/opentracing-go" "google.golang.org/api/iterator" "google.golang.org/api/option" google_http "google.golang.org/api/transport/http" @@ -35,6 +38,8 @@ type readerWriter struct { hedgedBucket *storage.BucketHandle } +var tracer = otel.Tracer("tempodb/backend/gcs") + var ( _ backend.RawReader = (*readerWriter)(nil) _ backend.RawWriter = (*readerWriter)(nil) @@ -106,17 +111,17 @@ func internalNew(cfg *Config, confirm bool) (*readerWriter, error) { // Write implements backend.Writer func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, _ int64, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "gcs.Write") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "gcs.Write") + defer span.End() - span.SetTag("object", name) + span.SetAttributes(attribute.String("object", name)) w := rw.writer(derivedCtx, backend.ObjectFileName(keypath, name), nil) _, err := io.Copy(w, data) if err != nil { w.Close() - span.SetTag("error", true) + span.RecordError(err) return fmt.Errorf("failed to write: %w", err) } @@ -126,10 +131,10 @@ func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend. // Append implements backend.Writer func (rw *readerWriter) Append(ctx context.Context, name string, keypath backend.KeyPath, tracker backend.AppendTracker, buffer []byte) (backend.AppendTracker, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, ctx := opentracing.StartSpanFromContext(ctx, "gcs.Append", opentracing.Tags{ - "len": len(buffer), - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "gcs.Append", trace.WithAttributes( + attribute.Int("len", len(buffer)), + )) + defer span.End() var w *storage.Writer if tracker == nil { @@ -192,8 +197,8 @@ func (rw *readerWriter) List(ctx context.Context, keypath backend.KeyPath) ([]st // ListBlocks implements backend.Reader func (rw *readerWriter) ListBlocks(ctx context.Context, tenant string) ([]uuid.UUID, []uuid.UUID, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "readerWriter.ListBlocks") - defer span.Finish() + ctx, span := tracer.Start(ctx, "readerWriter.ListBlocks") + defer span.End() var ( wg sync.WaitGroup @@ -311,14 +316,14 @@ func (rw *readerWriter) ListBlocks(ctx context.Context, tenant string) ([]uuid.U // Read implements backend.Reader func (rw *readerWriter) Read(ctx context.Context, name string, keypath backend.KeyPath, _ *backend.CacheInfo) (io.ReadCloser, int64, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "gcs.Read") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "gcs.Read") + defer span.End() - span.SetTag("object", name) + span.SetAttributes(attribute.String("object", name)) b, _, err := rw.readAll(derivedCtx, backend.ObjectFileName(keypath, name)) if err != nil { - span.SetTag("error", true) + span.SetStatus(codes.Error, "") } return io.NopCloser(bytes.NewReader(b)), int64(len(b)), readError(err) } @@ -326,15 +331,15 @@ func (rw *readerWriter) Read(ctx context.Context, name string, keypath backend.K // ReadRange implements backend.Reader func (rw *readerWriter) ReadRange(ctx context.Context, name string, keypath backend.KeyPath, offset uint64, buffer []byte, _ *backend.CacheInfo) error { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "gcs.ReadRange", opentracing.Tags{ - "len": len(buffer), - "offset": offset, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "gcs.ReadRange", trace.WithAttributes( + attribute.Int("len", len(buffer)), + attribute.Int64("offset", int64(offset)), + )) + defer span.End() err := rw.readRange(derivedCtx, backend.ObjectFileName(keypath, name), int64(offset), buffer) if err != nil { - span.SetTag("error", true) + span.SetStatus(codes.Error, "") } return readError(err) } @@ -345,10 +350,10 @@ func (rw *readerWriter) Shutdown() { func (rw *readerWriter) WriteVersioned(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, version backend.Version) (backend.Version, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "gcs.WriteVersioned", opentracing.Tags{ - "object": name, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "gcs.WriteVersioned", trace.WithAttributes( + attribute.String("object", name), + )) + defer span.End() preconditions, err := createPreconditions(version) if err != nil { @@ -360,7 +365,7 @@ func (rw *readerWriter) WriteVersioned(ctx context.Context, name string, keypath _, err = io.Copy(w, data) if err != nil { w.Close() - span.SetTag("error", true) + span.SetStatus(codes.Error, "failed to write") return "", fmt.Errorf("failed to write: %w", err) } @@ -386,14 +391,14 @@ func (rw *readerWriter) DeleteVersioned(ctx context.Context, name string, keypat func (rw *readerWriter) ReadVersioned(ctx context.Context, name string, keypath backend.KeyPath) (io.ReadCloser, backend.Version, error) { keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "gcs.ReadVersioned", opentracing.Tags{ - "object": name, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "gcs.ReadVersioned", trace.WithAttributes( + attribute.String("object", name), + )) + defer span.End() b, attrs, err := rw.readAll(derivedCtx, backend.ObjectFileName(keypath, name)) if err != nil { - span.SetTag("error", true) + span.SetStatus(codes.Error, "") return nil, "", readError(err) } return io.NopCloser(bytes.NewReader(b)), toVersion(attrs.Generation), nil diff --git a/tempodb/backend/local/local.go b/tempodb/backend/local/local.go index 08f120cd490..54befa17daa 100644 --- a/tempodb/backend/local/local.go +++ b/tempodb/backend/local/local.go @@ -9,7 +9,9 @@ import ( "strings" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/tempodb/backend" ) @@ -18,6 +20,8 @@ type Backend struct { cfg *Config } +var tracer = otel.Tracer("tempodb/backend/local") + var ( _ backend.RawReader = (*Backend)(nil) _ backend.RawWriter = (*Backend)(nil) @@ -75,10 +79,10 @@ func (rw *Backend) Append(ctx context.Context, name string, keypath backend.KeyP return nil, err } - span, _ := opentracing.StartSpanFromContext(ctx, "local.Append", opentracing.Tags{ - "len": len(buffer), - }) - defer span.Finish() + _, span := tracer.Start(ctx, "local.Append", trace.WithAttributes( + attribute.Int("len", len(buffer)), + )) + defer span.End() var dst *os.File if tracker == nil { @@ -218,11 +222,11 @@ func (rw *Backend) ReadRange(ctx context.Context, name string, keypath backend.K return err } - span, _ := opentracing.StartSpanFromContext(ctx, "local.ReadRange", opentracing.Tags{ - "len": len(buffer), - "offset": offset, - }) - defer span.Finish() + _, span := tracer.Start(ctx, "local.ReadRange", trace.WithAttributes( + attribute.Int("len", len(buffer)), + attribute.Int64("offset", int64(offset)), + )) + defer span.End() filename := rw.objectFileName(keypath, name) diff --git a/tempodb/backend/s3/s3.go b/tempodb/backend/s3/s3.go index dd037112e68..9a11ec23050 100644 --- a/tempodb/backend/s3/s3.go +++ b/tempodb/backend/s3/s3.go @@ -13,6 +13,10 @@ import ( "sync" "github.com/google/uuid" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/tempodb/backend/instrumentation" @@ -22,7 +26,6 @@ import ( "github.com/go-kit/log/level" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/opentracing/opentracing-go" "github.com/grafana/tempo/pkg/blockboundary" tempo_io "github.com/grafana/tempo/pkg/io" @@ -38,6 +41,8 @@ type readerWriter struct { hedgedCore *minio.Core } +var tracer = otel.Tracer("tempodb/backend/s3") + var ( _ backend.RawReader = (*readerWriter)(nil) _ backend.RawWriter = (*readerWriter)(nil) @@ -138,10 +143,10 @@ func getPutObjectOptions(rw *readerWriter) minio.PutObjectOptions { // Write implements backend.Writer func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, size int64, _ *backend.CacheInfo) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "s3.Write") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "s3.Write") + defer span.End() - span.SetTag("object", name) + span.SetAttributes(attribute.String("object", name)) keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) objName := backend.ObjectFileName(keypath, name) @@ -157,7 +162,7 @@ func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend. putObjectOptions, ) if err != nil { - span.SetTag("error", true) + span.SetStatus(codes.Error, "error writing object to s3 backend") return fmt.Errorf("error writing object to s3 backend, object %s: %w", objName, err) } level.Debug(rw.logger).Log("msg", "object uploaded to s3", "objectName", objName, "size", info.Size) @@ -167,10 +172,10 @@ func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend. // AppendObject implements backend.Writer func (rw *readerWriter) Append(ctx context.Context, name string, keypath backend.KeyPath, tracker backend.AppendTracker, buffer []byte) (backend.AppendTracker, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "s3.Append", opentracing.Tags{ - "len": len(buffer), - }) - defer span.Finish() + ctx, span := tracer.Start(ctx, "s3.Append", trace.WithAttributes( + attribute.Int("len", len(buffer)), + )) + defer span.End() var a appendTracker keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) @@ -285,8 +290,8 @@ func (rw *readerWriter) ListBlocks( ctx context.Context, tenant string, ) ([]uuid.UUID, []uuid.UUID, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "readerWriter.ListBlocks") - defer span.Finish() + ctx, span := tracer.Start(ctx, "readerWriter.ListBlocks") + defer span.End() blockIDs := make([]uuid.UUID, 0, 1000) compactedBlockIDs := make([]uuid.UUID, 0, 1000) @@ -393,8 +398,8 @@ func (rw *readerWriter) ListBlocks( // Read implements backend.Reader func (rw *readerWriter) Read(ctx context.Context, name string, keypath backend.KeyPath, _ *backend.CacheInfo) (io.ReadCloser, int64, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "s3.Read") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "s3.Read") + defer span.End() keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) b, err := rw.readAll(derivedCtx, backend.ObjectFileName(keypath, name)) @@ -407,11 +412,11 @@ func (rw *readerWriter) Read(ctx context.Context, name string, keypath backend.K // ReadRange implements backend.Reader func (rw *readerWriter) ReadRange(ctx context.Context, name string, keypath backend.KeyPath, offset uint64, buffer []byte, _ *backend.CacheInfo) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "s3.ReadRange", opentracing.Tags{ - "len": len(buffer), - "offset": offset, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "s3.ReadRange", trace.WithAttributes( + attribute.Int("len", len(buffer)), + attribute.Int64("offset", int64(offset)), + )) + defer span.End() keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) return readError(rw.readRange(derivedCtx, backend.ObjectFileName(keypath, name), int64(offset), buffer)) @@ -468,8 +473,8 @@ func (rw *readerWriter) DeleteVersioned(ctx context.Context, name string, keypat } func (rw *readerWriter) ReadVersioned(ctx context.Context, name string, keypath backend.KeyPath) (io.ReadCloser, backend.Version, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "s3.ReadVersioned") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "s3.ReadVersioned") + defer span.End() keypath = backend.KeyPathWithPrefix(keypath, rw.cfg.Prefix) b, objectInfo, err := rw.readAllWithObjInfo(derivedCtx, backend.ObjectFileName(keypath, name)) diff --git a/tempodb/blocklist/poller.go b/tempodb/blocklist/poller.go index 6afa995440b..55b117fd598 100644 --- a/tempodb/blocklist/poller.go +++ b/tempodb/blocklist/poller.go @@ -13,11 +13,12 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/google/uuid" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - spanlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/boundedwaitgroup" "github.com/grafana/tempo/tempodb/backend" @@ -28,6 +29,8 @@ const ( blockStatusCompactedLabel = "compacted" ) +var tracer = otel.Tracer("tempodb/blocklist") + var ( metricBackendObjects = promauto.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "tempodb", @@ -136,8 +139,8 @@ func (p *Poller) Do(previous *List) (PerTenant, PerTenantCompacted, error) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - span, _ := opentracing.StartSpanFromContext(ctx, "Poller.Do") - defer span.Finish() + _, span := tracer.Start(ctx, "Poller.Do") + defer span.End() tenants, err := p.reader.Tenants(ctx) if err != nil { @@ -190,12 +193,12 @@ func (p *Poller) pollTenantAndCreateIndex( tenantID string, previous *List, ) ([]*backend.BlockMeta, []*backend.CompactedBlockMeta, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "Poller.pollTenantAndCreateIndex", opentracing.Tag{Key: "tenant", Value: tenantID}) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "Poller.pollTenantAndCreateIndex", trace.WithAttributes(attribute.String("tenant", tenantID))) + defer span.End() // are we a tenant index builder? builder := p.tenantIndexBuilder(tenantID) - span.SetTag("tenant_index_builder", builder) + span.SetAttributes(attribute.Bool("tenant_index_builder", builder)) if !builder { metricTenantIndexBuilder.WithLabelValues(tenantID).Set(0) @@ -206,15 +209,13 @@ func (p *Poller) pollTenantAndCreateIndex( metricTenantIndexAgeSeconds.WithLabelValues(tenantID).Set(float64(time.Since(i.CreatedAt) / time.Second)) level.Info(p.logger).Log("msg", "successfully pulled tenant index", "tenant", tenantID, "createdAt", i.CreatedAt, "metas", len(i.Meta), "compactedMetas", len(i.CompactedMeta)) - span.SetTag("metas", len(i.Meta)) - span.SetTag("compactedMetas", len(i.CompactedMeta)) + span.SetAttributes(attribute.Int("metas", len(i.Meta))) + span.SetAttributes(attribute.Int("compactedMetas", len(i.CompactedMeta))) return i.Meta, i.CompactedMeta, nil } metricTenantIndexErrors.WithLabelValues(tenantID).Inc() - span.LogFields( - spanlog.Error(err), - ) + span.RecordError(err) // there was an error, return the error if we're not supposed to fallback to polling if !p.cfg.PollFallback { @@ -251,8 +252,8 @@ func (p *Poller) pollTenantBlocks( tenantID string, previous *List, ) ([]*backend.BlockMeta, []*backend.CompactedBlockMeta, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "Poller.pollTenantBlocks") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "Poller.pollTenantBlocks") + defer span.End() currentBlockIDs, currentCompactedBlockIDs, err := p.reader.Blocks(derivedCtx, tenantID) if err != nil { @@ -269,8 +270,8 @@ func (p *Poller) pollTenantBlocks( unknownBlockIDs = make(map[uuid.UUID]bool, 1000) ) - span.SetTag("metas", len(metas)) - span.SetTag("compactedMetas", len(compactedMetas)) + span.SetAttributes(attribute.Int("metas", len(metas))) + span.SetAttributes(attribute.Int("compactedMetas", len(compactedMetas))) for _, i := range metas { mm[i.BlockID] = i @@ -330,10 +331,10 @@ func (p *Poller) pollUnknown( unknownBlocks map[uuid.UUID]bool, tenantID string, ) ([]*backend.BlockMeta, []*backend.CompactedBlockMeta, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "pollUnknown", opentracing.Tags{ - "unknownBlockIDs": len(unknownBlocks), - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "pollUnknown", trace.WithAttributes( + attribute.Int("unknownBlockIDs", len(unknownBlocks)), + )) + defer span.End() var ( err error @@ -385,8 +386,8 @@ func (p *Poller) pollUnknown( if len(errs) > 0 { metricTenantIndexErrors.WithLabelValues(tenantID).Inc() err = errors.Join(errs...) - ext.Error.Set(span, true) - span.SetTag("err", err) + span.SetStatus(codes.Error, "") + span.RecordError(err) return nil, nil, err } @@ -400,12 +401,12 @@ func (p *Poller) pollBlock( blockID uuid.UUID, compacted bool, ) (*backend.BlockMeta, *backend.CompactedBlockMeta, error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "Poller.pollBlock") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "Poller.pollBlock") + defer span.End() var err error - span.SetTag("tenant", tenantID) - span.SetTag("block", blockID.String()) + span.SetAttributes(attribute.String("tenant", tenantID)) + span.SetAttributes(attribute.String("block", blockID.String())) var blockMeta *backend.BlockMeta var compactedBlockMeta *backend.CompactedBlockMeta diff --git a/tempodb/compactor.go b/tempodb/compactor.go index b20418a7e12..57730389b54 100644 --- a/tempodb/compactor.go +++ b/tempodb/compactor.go @@ -9,9 +9,9 @@ import ( "time" "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" "github.com/grafana/tempo/pkg/dataquality" "github.com/grafana/tempo/pkg/util/tracing" @@ -31,6 +31,8 @@ const ( DefaultIteratorBufferSize = 1000 ) +var tracer = otel.Tracer("tempodb/compactor") + var ( metricCompactionBlocks = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "tempodb", @@ -172,8 +174,8 @@ func (rw *readerWriter) compact(ctx context.Context, blockMetas []*backend.Block level.Debug(rw.logger).Log("msg", "beginning compaction", "num blocks compacting", len(blockMetas)) // todo - add timeout? - span, ctx := opentracing.StartSpanFromContext(ctx, "rw.compact") - defer span.Finish() + ctx, span := tracer.Start(ctx, "rw.compact") + defer span.End() traceID, _ := tracing.ExtractTraceID(ctx) if traceID != "" { diff --git a/tempodb/encoding/v2/backend_block.go b/tempodb/encoding/v2/backend_block.go index d43c7dec8ae..485d5916af2 100644 --- a/tempodb/encoding/v2/backend_block.go +++ b/tempodb/encoding/v2/backend_block.go @@ -5,8 +5,9 @@ import ( "context" "fmt" - "github.com/opentracing/opentracing-go" willf_bloom "github.com/willf/bloom" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/model" @@ -38,15 +39,16 @@ func NewBackendBlock(meta *backend.BlockMeta, r backend.Reader) (*BackendBlock, // Find searches a block for the ID and returns an object if found. func (b *BackendBlock) find(ctx context.Context, id common.ID) ([]byte, error) { var err error - span, ctx := opentracing.StartSpanFromContext(ctx, "BackendBlock.find") + ctx, span := tracer.Start(ctx, "BackendBlock.find") defer func() { if err != nil { - span.SetTag("error", true) + span.RecordError(err) + span.SetStatus(codes.Error, "") } - span.Finish() + span.End() }() - span.SetTag("block", b.meta.BlockID.String()) + span.SetAttributes(attribute.String("block", b.meta.BlockID.String())) shardKey := common.ShardKeyForTraceID(id, int(b.meta.BloomShardCount)) blockID := b.meta.BlockID @@ -126,8 +128,8 @@ func (b *BackendBlock) BlockMeta() *backend.BlockMeta { } func (b *BackendBlock) FindTraceByID(ctx context.Context, id common.ID, _ common.SearchOptions) (*tempopb.Trace, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "BackendBlock.FindTraceByID") - defer span.Finish() + ctx, span := tracer.Start(ctx, "BackendBlock.FindTraceByID") + defer span.End() obj, err := b.find(ctx, id) if err != nil { diff --git a/tempodb/encoding/v2/index_reader.go b/tempodb/encoding/v2/index_reader.go index d76ee087d51..6b939f7874d 100644 --- a/tempodb/encoding/v2/index_reader.go +++ b/tempodb/encoding/v2/index_reader.go @@ -6,13 +6,15 @@ import ( "fmt" "github.com/grafana/tempo/pkg/sort" + "go.opentelemetry.io/otel" "github.com/cespare/xxhash" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" ) +var tracer = otel.Tracer("tempodb/encoding/v2") + type indexReader struct { r backend.ContextReader recordRW RecordReaderWriter @@ -86,8 +88,8 @@ func (r *indexReader) Find(ctx context.Context, id common.ID) (*Record, int, err // with a linear distribution of trace ids we can actually do much better than a normal // binary search. unfortunately there are edge cases which make this perform far worse. // for instance consider a set of trace ids what with 90% 64 bit ids and 10% 128 bit ids. - span, ctx := opentracing.StartSpanFromContext(ctx, "indexReader.Find") - defer span.Finish() + ctx, span := tracer.Start(ctx, "indexReader.Find") + defer span.End() i, err := sort.SearchWithErrors(r.totalRecords, func(i int) (bool, error) { record, err := r.At(ctx, i) diff --git a/tempodb/encoding/v2/wal_block.go b/tempodb/encoding/v2/wal_block.go index 967c88a8885..2f73d3a2efd 100644 --- a/tempodb/encoding/v2/wal_block.go +++ b/tempodb/encoding/v2/wal_block.go @@ -13,7 +13,6 @@ import ( "time" "github.com/google/uuid" - "github.com/opentracing/opentracing-go" "github.com/grafana/tempo/pkg/dataquality" "github.com/grafana/tempo/pkg/model" @@ -244,8 +243,8 @@ func (a *walBlock) Clear() error { // Find implements common.Finder func (a *walBlock) FindTraceByID(ctx context.Context, id common.ID, _ common.SearchOptions) (*tempopb.Trace, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "v2WalBlock.FindTraceByID") - defer span.Finish() + _, span := tracer.Start(ctx, "v2WalBlock.FindTraceByID") + defer span.End() combiner := model.StaticCombiner diff --git a/tempodb/encoding/vparquet/block.go b/tempodb/encoding/vparquet/block.go index c6965415aba..5d3c9583c38 100644 --- a/tempodb/encoding/vparquet/block.go +++ b/tempodb/encoding/vparquet/block.go @@ -7,12 +7,15 @@ import ( "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" + "go.opentelemetry.io/otel" ) const ( DataFileName = "data.parquet" ) +var tracer = otel.Tracer("tempodb/encoding/vparquet") + type backendBlock struct { meta *backend.BlockMeta r backend.Reader diff --git a/tempodb/encoding/vparquet/block_findtracebyid.go b/tempodb/encoding/vparquet/block_findtracebyid.go index ddea9ee5138..3973f08a921 100644 --- a/tempodb/encoding/vparquet/block_findtracebyid.go +++ b/tempodb/encoding/vparquet/block_findtracebyid.go @@ -7,9 +7,10 @@ import ( "fmt" "io" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" "github.com/willf/bloom" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/parquetquery" @@ -29,16 +30,16 @@ const ( ) func (b *backendBlock) checkBloom(ctx context.Context, id common.ID) (found bool, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.checkBloom", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.checkBloom", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + )) + defer span.End() shardKey := common.ShardKeyForTraceID(id, int(b.meta.BloomShardCount)) nameBloom := common.BloomName(shardKey) - span.SetTag("bloom", nameBloom) + span.SetAttributes(attribute.String("bloom", nameBloom)) bloomBytes, err := b.r.Read(derivedCtx, nameBloom, b.meta.BlockID, b.meta.TenantID, &backend.CacheInfo{ Meta: b.meta, @@ -58,13 +59,13 @@ func (b *backendBlock) checkBloom(ctx context.Context, id common.ID) (found bool } func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opts common.SearchOptions) (_ *tempopb.Trace, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.FindTraceByID", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.FindTraceByID", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() found, err := b.checkBloom(derivedCtx, traceID) if err != nil { @@ -79,7 +80,7 @@ func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opt return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } defer func() { - span.SetTag("inspectedBytes", rr.BytesRead()) + span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return findTraceByID(derivedCtx, traceID, b.meta, pf) diff --git a/tempodb/encoding/vparquet/block_search.go b/tempodb/encoding/vparquet/block_search.go index 6994ee23b38..725fa832eaa 100644 --- a/tempodb/encoding/vparquet/block_search.go +++ b/tempodb/encoding/vparquet/block_search.go @@ -10,8 +10,9 @@ import ( "strconv" "time" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" tempo_io "github.com/grafana/tempo/pkg/io" pq "github.com/grafana/tempo/pkg/parquetquery" @@ -92,27 +93,27 @@ func (b *backendBlock) openForSearch(ctx context.Context, opts common.SearchOpti // cached reader readerAt = newCachedReaderAt(readerAt, backendReaderAt) - span, _ := opentracing.StartSpanFromContext(ctx, "parquet.OpenFile") - defer span.Finish() + _, span := tracer.Start(ctx, "parquet.OpenFile") + defer span.End() pf, err := parquet.OpenFile(readerAt, int64(b.meta.Size), o...) return pf, backendReaderAt, err } func (b *backendBlock) Search(ctx context.Context, req *tempopb.SearchRequest, opts common.SearchOptions) (_ *tempopb.SearchResponse, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.Search", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.Search", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() // Get list of row groups to inspect. Ideally we use predicate pushdown // here to keep only row groups that can potentially satisfy the request diff --git a/tempodb/encoding/vparquet/block_search_tags.go b/tempodb/encoding/vparquet/block_search_tags.go index 345466ab655..f4c2290bfa7 100644 --- a/tempodb/encoding/vparquet/block_search_tags.go +++ b/tempodb/encoding/vparquet/block_search_tags.go @@ -9,8 +9,9 @@ import ( pq "github.com/grafana/tempo/pkg/parquetquery" "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var translateTagToAttribute = map[string]traceql.Attribute{ @@ -41,19 +42,19 @@ var nonTraceQLAttributes = map[string]string{ } func (b *backendBlock) SearchTags(ctx context.Context, scope traceql.AttributeScope, cb common.TagCallback, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTags", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTags", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTags(derivedCtx, scope, cb, pf) } @@ -197,19 +198,19 @@ func (b *backendBlock) SearchTagValues(ctx context.Context, tag string, cb commo } func (b *backendBlock) SearchTagValuesV2(ctx context.Context, tag traceql.Attribute, cb common.TagCallbackV2, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTagValuesV2", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTagValuesV2", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTagValues(derivedCtx, tag, cb, pf) } diff --git a/tempodb/encoding/vparquet/compactor.go b/tempodb/encoding/vparquet/compactor.go index 95b52a47b11..92dd937d527 100644 --- a/tempodb/encoding/vparquet/compactor.go +++ b/tempodb/encoding/vparquet/compactor.go @@ -13,7 +13,6 @@ import ( "github.com/go-kit/log/level" "github.com/google/uuid" tempoUtil "github.com/grafana/tempo/pkg/util" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" tempo_io "github.com/grafana/tempo/pkg/io" @@ -56,8 +55,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, block := newBackendBlock(blockMeta, r) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.iterator") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "vparquet.compactor.iterator") + defer span.End() iter, err := block.rawIter(derivedCtx, pool) if err != nil { @@ -213,8 +212,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, } func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.appendBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.appendBlock") + defer span.End() var ( objs = block.CurrentBufferedObjects() @@ -241,8 +240,8 @@ func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l lo } func (c *Compactor) finishBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.finishBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.finishBlock") + defer span.End() bytesFlushed, err := block.Complete() if err != nil { diff --git a/tempodb/encoding/vparquet2/block.go b/tempodb/encoding/vparquet2/block.go index 182a83b2692..72adfdd3655 100644 --- a/tempodb/encoding/vparquet2/block.go +++ b/tempodb/encoding/vparquet2/block.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/grafana/tempo/pkg/traceql" + "go.opentelemetry.io/otel" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" @@ -14,6 +15,8 @@ const ( DataFileName = "data.parquet" ) +var tracer = otel.Tracer("tempodb/encoding/vparquet2") + type backendBlock struct { meta *backend.BlockMeta r backend.Reader diff --git a/tempodb/encoding/vparquet2/block_findtracebyid.go b/tempodb/encoding/vparquet2/block_findtracebyid.go index 7442c1320ce..c04e5ba4b85 100644 --- a/tempodb/encoding/vparquet2/block_findtracebyid.go +++ b/tempodb/encoding/vparquet2/block_findtracebyid.go @@ -8,9 +8,10 @@ import ( "io" "os" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" "github.com/willf/bloom" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/parquetquery" @@ -33,16 +34,16 @@ const ( ) func (b *backendBlock) checkBloom(ctx context.Context, id common.ID) (found bool, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.checkBloom", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.checkBloom", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + )) + defer span.End() shardKey := common.ShardKeyForTraceID(id, int(b.meta.BloomShardCount)) nameBloom := common.BloomName(shardKey) - span.SetTag("bloom", nameBloom) + span.SetAttributes(attribute.String("bloom", nameBloom)) bloomBytes, err := b.r.Read(derivedCtx, nameBloom, b.meta.BlockID, b.meta.TenantID, &backend.CacheInfo{ Meta: b.meta, @@ -67,12 +68,12 @@ func (b *backendBlock) checkIndex(ctx context.Context, id common.ID) (bool, int, return true, -1, nil } - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.checkIndex", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.checkIndex", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + )) + defer span.End() indexBytes, err := b.r.Read(derivedCtx, common.NameIndex, b.meta.BlockID, b.meta.TenantID, &backend.CacheInfo{ Meta: b.meta, @@ -100,13 +101,13 @@ func (b *backendBlock) checkIndex(ctx context.Context, id common.ID) (bool, int, } func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opts common.SearchOptions) (_ *tempopb.Trace, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.FindTraceByID", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.FindTraceByID", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() found, err := b.checkBloom(derivedCtx, traceID) if err != nil { @@ -129,7 +130,7 @@ func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opt return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } defer func() { - span.SetTag("inspectedBytes", rr.BytesRead()) + span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return findTraceByID(derivedCtx, traceID, b.meta, pf, rowGroup) diff --git a/tempodb/encoding/vparquet2/block_search.go b/tempodb/encoding/vparquet2/block_search.go index bf350b856e4..c374e75b072 100644 --- a/tempodb/encoding/vparquet2/block_search.go +++ b/tempodb/encoding/vparquet2/block_search.go @@ -10,8 +10,9 @@ import ( "strconv" "time" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" tempo_io "github.com/grafana/tempo/pkg/io" pq "github.com/grafana/tempo/pkg/parquetquery" @@ -92,27 +93,27 @@ func (b *backendBlock) openForSearch(ctx context.Context, opts common.SearchOpti // cached reader readerAt = newCachedReaderAt(readerAt, backendReaderAt) - span, _ := opentracing.StartSpanFromContext(ctx, "parquet.OpenFile") - defer span.Finish() + _, span := tracer.Start(ctx, "parquet.OpenFile") + defer span.End() pf, err := parquet.OpenFile(readerAt, int64(b.meta.Size), o...) return pf, backendReaderAt, err } func (b *backendBlock) Search(ctx context.Context, req *tempopb.SearchRequest, opts common.SearchOptions) (_ *tempopb.SearchResponse, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.Search", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.Search", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() // Get list of row groups to inspect. Ideally we use predicate pushdown // here to keep only row groups that can potentially satisfy the request diff --git a/tempodb/encoding/vparquet2/block_search_tags.go b/tempodb/encoding/vparquet2/block_search_tags.go index 859c68b67f4..58b6f4e3382 100644 --- a/tempodb/encoding/vparquet2/block_search_tags.go +++ b/tempodb/encoding/vparquet2/block_search_tags.go @@ -9,8 +9,9 @@ import ( pq "github.com/grafana/tempo/pkg/parquetquery" "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var translateTagToAttribute = map[string]traceql.Attribute{ @@ -41,19 +42,19 @@ var nonTraceQLAttributes = map[string]string{ } func (b *backendBlock) SearchTags(ctx context.Context, scope traceql.AttributeScope, cb common.TagCallback, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTags", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTags", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTags(derivedCtx, scope, cb, pf) } @@ -197,19 +198,19 @@ func (b *backendBlock) SearchTagValues(ctx context.Context, tag string, cb commo } func (b *backendBlock) SearchTagValuesV2(ctx context.Context, tag traceql.Attribute, cb common.TagCallbackV2, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTagValuesV2", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTagValuesV2", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTagValues(derivedCtx, tag, cb, pf) } diff --git a/tempodb/encoding/vparquet2/compactor.go b/tempodb/encoding/vparquet2/compactor.go index 714faa8c68a..b6c5a2dcc70 100644 --- a/tempodb/encoding/vparquet2/compactor.go +++ b/tempodb/encoding/vparquet2/compactor.go @@ -13,7 +13,6 @@ import ( "github.com/go-kit/log/level" "github.com/google/uuid" tempoUtil "github.com/grafana/tempo/pkg/util" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" tempo_io "github.com/grafana/tempo/pkg/io" @@ -56,8 +55,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, block := newBackendBlock(blockMeta, r) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.iterator") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "vparquet.compactor.iterator") + defer span.End() iter, err := block.rawIter(derivedCtx, pool) if err != nil { @@ -213,8 +212,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, } func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.appendBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.appendBlock") + defer span.End() var ( objs = block.CurrentBufferedObjects() @@ -241,8 +240,8 @@ func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l lo } func (c *Compactor) finishBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.finishBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.finishBlock") + defer span.End() bytesFlushed, err := block.Complete() if err != nil { diff --git a/tempodb/encoding/vparquet3/block.go b/tempodb/encoding/vparquet3/block.go index 83aaf36d4fc..55dd67fd66c 100644 --- a/tempodb/encoding/vparquet3/block.go +++ b/tempodb/encoding/vparquet3/block.go @@ -5,12 +5,15 @@ import ( "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" + "go.opentelemetry.io/otel" ) const ( DataFileName = "data.parquet" ) +var tracer = otel.Tracer("tempodb/encoding/vparquet3") + type backendBlock struct { meta *backend.BlockMeta r backend.Reader diff --git a/tempodb/encoding/vparquet3/block_findtracebyid.go b/tempodb/encoding/vparquet3/block_findtracebyid.go index 32cac8ce0eb..b391a2cae46 100644 --- a/tempodb/encoding/vparquet3/block_findtracebyid.go +++ b/tempodb/encoding/vparquet3/block_findtracebyid.go @@ -8,9 +8,10 @@ import ( "io" "os" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" "github.com/willf/bloom" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/parquetquery" @@ -33,16 +34,16 @@ const ( ) func (b *backendBlock) checkBloom(ctx context.Context, id common.ID) (found bool, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.checkBloom", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.checkBloom", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + )) + defer span.End() shardKey := common.ShardKeyForTraceID(id, int(b.meta.BloomShardCount)) nameBloom := common.BloomName(shardKey) - span.SetTag("bloom", nameBloom) + span.SetAttributes(attribute.String("bloom", nameBloom)) bloomBytes, err := b.r.Read(derivedCtx, nameBloom, b.meta.BlockID, b.meta.TenantID, &backend.CacheInfo{ Meta: b.meta, @@ -67,12 +68,12 @@ func (b *backendBlock) checkIndex(ctx context.Context, id common.ID) (bool, int, return true, -1, nil } - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet3.backendBlock.checkIndex", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet3.backendBlock.checkIndex", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + )) + defer span.End() indexBytes, err := b.r.Read(derivedCtx, common.NameIndex, b.meta.BlockID, b.meta.TenantID, &backend.CacheInfo{ Meta: b.meta, @@ -100,13 +101,13 @@ func (b *backendBlock) checkIndex(ctx context.Context, id common.ID) (bool, int, } func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opts common.SearchOptions) (_ *tempopb.Trace, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.FindTraceByID", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.FindTraceByID", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() found, err := b.checkBloom(derivedCtx, traceID) if err != nil { @@ -129,7 +130,7 @@ func (b *backendBlock) FindTraceByID(ctx context.Context, traceID common.ID, opt return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } defer func() { - span.SetTag("inspectedBytes", rr.BytesRead()) + span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return findTraceByID(derivedCtx, traceID, opts.MaxBytes, b.meta, pf, rowGroup) diff --git a/tempodb/encoding/vparquet3/block_search.go b/tempodb/encoding/vparquet3/block_search.go index 97a0f150ef3..a95de6e0fe5 100644 --- a/tempodb/encoding/vparquet3/block_search.go +++ b/tempodb/encoding/vparquet3/block_search.go @@ -9,8 +9,9 @@ import ( "strconv" "time" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" pq "github.com/grafana/tempo/pkg/parquetquery" "github.com/grafana/tempo/pkg/tempopb" @@ -82,27 +83,27 @@ func (b *backendBlock) openForSearch(ctx context.Context, opts common.SearchOpti // cached reader cachedReaderAt := newCachedReaderAt(backendReaderAt, readBufferSize, int64(b.meta.Size), b.meta.FooterSize) // most reads to the backend are going to be readbuffersize so use it as our "page cache" size - span, _ := opentracing.StartSpanFromContext(ctx, "parquet.OpenFile") - defer span.Finish() + _, span := tracer.Start(ctx, "parquet.OpenFile") + defer span.End() pf, err := parquet.OpenFile(cachedReaderAt, int64(b.meta.Size), o...) return pf, backendReaderAt, err } func (b *backendBlock) Search(ctx context.Context, req *tempopb.SearchRequest, opts common.SearchOptions) (_ *tempopb.SearchResponse, err error) { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.Search", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.Search", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return nil, fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() // Get list of row groups to inspect. Ideally we use predicate pushdown // here to keep only row groups that can potentially satisfy the request diff --git a/tempodb/encoding/vparquet3/block_search_tags.go b/tempodb/encoding/vparquet3/block_search_tags.go index 331b12e8195..c2dbe1f4892 100644 --- a/tempodb/encoding/vparquet3/block_search_tags.go +++ b/tempodb/encoding/vparquet3/block_search_tags.go @@ -10,8 +10,9 @@ import ( "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var translateTagToAttribute = map[string]traceql.Attribute{ @@ -42,19 +43,19 @@ var nonTraceQLAttributes = map[string]string{ } func (b *backendBlock) SearchTags(ctx context.Context, scope traceql.AttributeScope, cb common.TagCallback, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTags", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTags", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTags(derivedCtx, scope, cb, pf, b.meta.DedicatedColumns) } @@ -211,19 +212,19 @@ func (b *backendBlock) SearchTagValues(ctx context.Context, tag string, cb commo } func (b *backendBlock) SearchTagValuesV2(ctx context.Context, tag traceql.Attribute, cb common.TagCallbackV2, opts common.SearchOptions) error { - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "parquet.backendBlock.SearchTagValuesV2", - opentracing.Tags{ - "blockID": b.meta.BlockID, - "tenantID": b.meta.TenantID, - "blockSize": b.meta.Size, - }) - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "parquet.backendBlock.SearchTagValuesV2", + trace.WithAttributes( + attribute.String("blockID", b.meta.BlockID.String()), + attribute.String("tenantID", b.meta.TenantID), + attribute.Int64("blockSize", int64(b.meta.Size)), + )) + defer span.End() pf, rr, err := b.openForSearch(derivedCtx, opts) if err != nil { return fmt.Errorf("unexpected error opening parquet file: %w", err) } - defer func() { span.SetTag("inspectedBytes", rr.BytesRead()) }() + defer func() { span.SetAttributes(attribute.Int64("inspectedBytes", int64(rr.BytesRead()))) }() return searchTagValues(derivedCtx, tag, cb, pf, b.meta.DedicatedColumns) } diff --git a/tempodb/encoding/vparquet3/block_traceql.go b/tempodb/encoding/vparquet3/block_traceql.go index 9e360400fb5..f4d64397d72 100644 --- a/tempodb/encoding/vparquet3/block_traceql.go +++ b/tempodb/encoding/vparquet3/block_traceql.go @@ -13,8 +13,8 @@ import ( "time" "unsafe" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" + "go.opentelemetry.io/otel/attribute" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/parquetquery" @@ -2286,8 +2286,8 @@ func NewTraceIDShardingPredicate(shardID, shardCount uint32) parquetquery.Predic // is a single read and typically comes from cache. Without this we have to test every // row group in the file which would be N reads. func (b *backendBlock) rowGroupsForShard(ctx context.Context, pf *parquet.File, m backend.BlockMeta, shardID, shardCount uint32) ([]parquet.RowGroup, error) { - span, _ := opentracing.StartSpanFromContext(ctx, "parquet.rowGroupsForShard") - defer span.Finish() + _, span := tracer.Start(ctx, "parquet.rowGroupsForShard") + defer span.End() cacheInfo := &backend.CacheInfo{ Meta: &m, @@ -2327,8 +2327,8 @@ func (b *backendBlock) rowGroupsForShard(ctx context.Context, pf *parquet.File, } } - span.SetTag("totalRowGroups", len(rgs)) - span.SetTag("matchedRowGroups", len(matches)) + span.SetAttributes(attribute.Int("totalRowGroups", len(rgs))) + span.SetAttributes(attribute.Int("matchedRowGroups", len(matches))) return matches, nil } diff --git a/tempodb/encoding/vparquet3/compactor.go b/tempodb/encoding/vparquet3/compactor.go index 32f6c308ce7..5c2455040eb 100644 --- a/tempodb/encoding/vparquet3/compactor.go +++ b/tempodb/encoding/vparquet3/compactor.go @@ -13,7 +13,6 @@ import ( "github.com/go-kit/log/level" "github.com/google/uuid" tempoUtil "github.com/grafana/tempo/pkg/util" - "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" tempo_io "github.com/grafana/tempo/pkg/io" @@ -56,8 +55,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, block := newBackendBlock(blockMeta, r) - span, derivedCtx := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.iterator") - defer span.Finish() + derivedCtx, span := tracer.Start(ctx, "vparquet.compactor.iterator") + defer span.End() iter, err := block.rawIter(derivedCtx, pool) if err != nil { @@ -217,8 +216,8 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, } func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.appendBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.appendBlock") + defer span.End() var ( objs = block.CurrentBufferedObjects() @@ -245,8 +244,8 @@ func (c *Compactor) appendBlock(ctx context.Context, block *streamingBlock, l lo } func (c *Compactor) finishBlock(ctx context.Context, block *streamingBlock, l log.Logger) error { - span, _ := opentracing.StartSpanFromContext(ctx, "vparquet.compactor.finishBlock") - defer span.Finish() + _, span := tracer.Start(ctx, "vparquet.compactor.finishBlock") + defer span.End() bytesFlushed, err := block.Complete() if err != nil { diff --git a/tempodb/tempodb.go b/tempodb/tempodb.go index 28ffc448ca9..5d7f68677bc 100644 --- a/tempodb/tempodb.go +++ b/tempodb/tempodb.go @@ -9,6 +9,7 @@ import ( "time" "github.com/grafana/tempo/pkg/util" + "go.opentelemetry.io/otel/attribute" gkLog "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -30,7 +31,6 @@ import ( "github.com/grafana/tempo/tempodb/encoding/common" "github.com/grafana/tempo/tempodb/pool" "github.com/grafana/tempo/tempodb/wal" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ) @@ -273,8 +273,8 @@ func (rw *readerWriter) BlockMetas(tenantID string) []*backend.BlockMeta { func (rw *readerWriter) Find(ctx context.Context, tenantID string, id common.ID, blockStart string, blockEnd string, timeStart int64, timeEnd int64, opts common.SearchOptions) ([]*tempopb.Trace, []error, error) { // tracing instrumentation logger := log.WithContext(ctx, log.Logger) - span, ctx := opentracing.StartSpanFromContext(ctx, "store.Find") - defer span.Finish() + ctx, span := tracer.Start(ctx, "store.Find") + defer span.End() blockStartUUID, err := uuid.Parse(blockStart) if err != nil { @@ -341,11 +341,11 @@ func (rw *readerWriter) Find(ctx context.Context, tenantID string, id common.ID, partialTraceObjs[i] = partialTraces[i].(*tempopb.Trace) } - span.SetTag("blockErrs", len(funcErrs)) - span.SetTag("liveBlocks", len(blocklist)) - span.SetTag("liveBlocksSearched", blocksSearched) - span.SetTag("compactedBlocks", len(compactedBlocklist)) - span.SetTag("compactedBlocksSearched", compactedBlocksSearched) + span.SetAttributes(attribute.Int("blockErrs", len(funcErrs))) + span.SetAttributes(attribute.Int("liveBlocks", len(blocklist))) + span.SetAttributes(attribute.Int("liveBlocksSearched", blocksSearched)) + span.SetAttributes(attribute.Int("compactedBlocks", len(compactedBlocklist))) + span.SetAttributes(attribute.Int("compactedBlocksSearched", compactedBlocksSearched)) return partialTraceObjs, funcErrs, err } diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE deleted file mode 100644 index abe5fe170bd..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016, gRPC Ecosystem -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of grpc-opentracing nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS deleted file mode 100644 index 5cfe0175ee0..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS +++ /dev/null @@ -1,23 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the GRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of GRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of GRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of GRPC or any code incorporated within this -implementation of GRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of GRPC -shall terminate as of the date such litigation is filed. -Status API Training Shop Blog About diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md deleted file mode 100644 index 78c49dbbeaa..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# OpenTracing support for gRPC in Go - -The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based -systems in Go. - -## Installation - -``` -go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc -``` - -## Documentation - -See the basic usage examples below and the [package documentation on -godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc). - -## Client-side usage example - -Wherever you call `grpc.Dial`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Set up a connection to the server peer. -conn, err := grpc.Dial( - address, - ... // other options - grpc.WithUnaryInterceptor( - otgrpc.OpenTracingClientInterceptor(tracer)), - grpc.WithStreamInterceptor( - otgrpc.OpenTracingStreamClientInterceptor(tracer))) - -// All future RPC activity involving `conn` will be automatically traced. -``` - -## Server-side usage example - -Wherever you call `grpc.NewServer`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Initialize the gRPC server. -s := grpc.NewServer( - ... // other options - grpc.UnaryInterceptor( - otgrpc.OpenTracingServerInterceptor(tracer)), - grpc.StreamInterceptor( - otgrpc.OpenTracingStreamServerInterceptor(tracer))) - -// All future RPC activity involving `s` will be automatically traced. -``` - diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go deleted file mode 100644 index 3414e55cb1f..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go +++ /dev/null @@ -1,239 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "io" - "runtime" - "sync/atomic" -) - -// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - method string, - req, resp interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - opts ...grpc.CallOption, - ) error { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { - return invoker(ctx, method, req, resp, cc, opts...) - } - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - defer clientSpan.Finish() - ctx = injectSpanContext(ctx, tracer, clientSpan) - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC request", req)) - } - err = invoker(ctx, method, req, resp, cc, opts...) - if err == nil { - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(clientSpan, err, true) - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, req, resp, err) - } - return err - } -} - -// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating -// a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - opts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { - return streamer(ctx, desc, cc, method, opts...) - } - - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - ctx = injectSpanContext(ctx, tracer, clientSpan) - cs, err := streamer(ctx, desc, cc, method, opts...) - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - clientSpan.Finish() - return cs, err - } - return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil - } -} - -func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { - finishChan := make(chan struct{}) - - isFinished := new(int32) - *isFinished = 0 - finishFunc := func(err error) { - // The current OpenTracing specification forbids finishing a span more than - // once. Since we have multiple code paths that could concurrently call - // `finishFunc`, we need to add some sort of synchronization to guard against - // multiple finishing. - if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { - return - } - close(finishChan) - defer clientSpan.Finish() - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, nil, nil, err) - } - } - go func() { - select { - case <-finishChan: - // The client span is being finished by another code path; hence, no - // action is necessary. - case <-cs.Context().Done(): - finishFunc(cs.Context().Err()) - } - }() - otcs := &openTracingClientStream{ - ClientStream: cs, - desc: desc, - finishFunc: finishFunc, - } - - // The `ClientStream` interface allows one to omit calling `Recv` if it's - // known that the result will be `io.EOF`. See - // http://stackoverflow.com/q/42915337 - // In such cases, there's nothing that triggers the span to finish. We, - // therefore, set a finalizer so that the span and the context goroutine will - // at least be cleaned up when the garbage collector is run. - runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { - otcs.finishFunc(nil) - }) - return otcs -} - -type openTracingClientStream struct { - grpc.ClientStream - desc *grpc.StreamDesc - finishFunc func(error) -} - -func (cs *openTracingClientStream) Header() (metadata.MD, error) { - md, err := cs.ClientStream.Header() - if err != nil { - cs.finishFunc(err) - } - return md, err -} - -func (cs *openTracingClientStream) SendMsg(m interface{}) error { - err := cs.ClientStream.SendMsg(m) - if err != nil { - cs.finishFunc(err) - } - return err -} - -func (cs *openTracingClientStream) RecvMsg(m interface{}) error { - err := cs.ClientStream.RecvMsg(m) - if err == io.EOF { - cs.finishFunc(nil) - return err - } else if err != nil { - cs.finishFunc(err) - return err - } - if !cs.desc.ServerStreams { - cs.finishFunc(nil) - } - return err -} - -func (cs *openTracingClientStream) CloseSend() error { - err := cs.ClientStream.CloseSend() - if err != nil { - cs.finishFunc(err) - } - return err -} - -func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } else { - md = md.Copy() - } - mdWriter := metadataReaderWriter{md} - err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) - // We have no better place to record an error than the Span itself :-/ - if err != nil { - clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) - } - return metadata.NewOutgoingContext(ctx, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go deleted file mode 100644 index 41a6346f250..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go +++ /dev/null @@ -1,69 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// A Class is a set of types of outcomes (including errors) that will often -// be handled in the same way. -type Class string - -const ( - Unknown Class = "0xx" - // Success represents outcomes that achieved the desired results. - Success Class = "2xx" - // ClientError represents errors that were the client's fault. - ClientError Class = "4xx" - // ServerError represents errors that were the server's fault. - ServerError Class = "5xx" -) - -// ErrorClass returns the class of the given error -func ErrorClass(err error) Class { - if s, ok := status.FromError(err); ok { - switch s.Code() { - // Success or "success" - case codes.OK, codes.Canceled: - return Success - - // Client errors - case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, - codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, - codes.OutOfRange: - return ClientError - - // Server errors - case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, - codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: - return ServerError - - // Not sure - case codes.Unknown: - fallthrough - default: - return Unknown - } - } - return Unknown -} - -// SetSpanTags sets one or more tags on the given span according to the -// error. -func SetSpanTags(span opentracing.Span, err error, client bool) { - c := ErrorClass(err) - code := codes.Unknown - if s, ok := status.FromError(err); ok { - code = s.Code() - } - span.SetTag("response_code", code) - span.SetTag("response_class", c) - if err == nil { - return - } - if client || c == ServerError { - ext.Error.Set(span, true) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go deleted file mode 100644 index 903e8382e3f..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go +++ /dev/null @@ -1,76 +0,0 @@ -package otgrpc - -import "github.com/opentracing/opentracing-go" - -// Option instances may be used in OpenTracing(Server|Client)Interceptor -// initialization. -// -// See this post about the "functional options" pattern: -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type Option func(o *options) - -// LogPayloads returns an Option that tells the OpenTracing instrumentation to -// try to log application payloads in both directions. -func LogPayloads() Option { - return func(o *options) { - o.logPayloads = true - } -} - -// SpanInclusionFunc provides an optional mechanism to decide whether or not -// to trace a given gRPC call. Return true to create a Span and initiate -// tracing, false to not create a Span and not trace. -// -// parentSpanCtx may be nil if no parent could be extraction from either the Go -// context.Context (on the client) or the RPC (on the server). -type SpanInclusionFunc func( - parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool - -// IncludingSpans binds a IncludeSpanFunc to the options -func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { - return func(o *options) { - o.inclusionFunc = inclusionFunc - } -} - -// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add -// arbitrary tags/logs/etc to the opentracing.Span associated with client -// and/or server RPCs. -type SpanDecoratorFunc func( - span opentracing.Span, - method string, - req, resp interface{}, - grpcError error) - -// SpanDecorator binds a function that decorates gRPC Spans. -func SpanDecorator(decorator SpanDecoratorFunc) Option { - return func(o *options) { - o.decorator = decorator - } -} - -// The internal-only options struct. Obviously overkill at the moment; but will -// scale well as production use dictates other configuration and tuning -// parameters. -type options struct { - logPayloads bool - decorator SpanDecoratorFunc - // May be nil. - inclusionFunc SpanInclusionFunc -} - -// newOptions returns the default options. -func newOptions() *options { - return &options{ - logPayloads: false, - inclusionFunc: nil, - } -} - -func (o *options) apply(opts ...Option) { - for _, opt := range opts { - opt(o) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go deleted file mode 100644 index 4ff3d199783..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package otgrpc provides OpenTracing support for any gRPC client or server. -// -// See the README for simple usage examples: -// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md -package otgrpc diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go deleted file mode 100644 index 62cf54d2217..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go +++ /dev/null @@ -1,141 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (resp interface{}, err error) { - spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { - return handler(ctx, req) - } - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - - ctx = opentracing.ContextWithSpan(ctx, serverSpan) - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC request", req)) - } - resp, err = handler(ctx, req) - if err == nil { - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) - } - return resp, err - } -} - -// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by -// creating a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - spanContext, err := extractSpanContext(ss.Context(), tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { - return handler(srv, ss) - } - - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - ss = &openTracingServerStream{ - ServerStream: ss, - ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), - } - err = handler(srv, ss) - if err != nil { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) - } - return err - } -} - -type openTracingServerStream struct { - grpc.ServerStream - ctx context.Context -} - -func (ss *openTracingServerStream) Context() context.Context { - return ss.ctx -} - -func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - md = metadata.New(nil) - } - return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go deleted file mode 100644 index 9abd5eaa629..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go +++ /dev/null @@ -1,42 +0,0 @@ -package otgrpc - -import ( - "strings" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/metadata" -) - -var ( - // Morally a const: - gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} -) - -// metadataReaderWriter satisfies both the opentracing.TextMapReader and -// opentracing.TextMapWriter interfaces. -type metadataReaderWriter struct { - metadata.MD -} - -func (w metadataReaderWriter) Set(key, val string) { - // The GRPC HPACK implementation rejects any uppercase keys here. - // - // As such, since the HTTP_HEADERS format is case-insensitive anyway, we - // blindly lowercase the key (which is guaranteed to work in the - // Inject/Extract sense per the OpenTracing spec). - key = strings.ToLower(key) - w.MD[key] = append(w.MD[key], val) -} - -func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { - for k, vals := range w.MD { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md deleted file mode 100644 index 439bf79a90f..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# OpenTelemetry-Go Jaeger Exporter - -[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger) - -> **Deprecated:** This module is no longer supported. -> OpenTelemetry dropped support for Jaeger exporter in July 2023. -> Jaeger officially accepts and recommends using OTLP. -> Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) -> or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) instead. - -[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) implementation. - -## Installation - -``` -go get -u go.opentelemetry.io/otel/exporters/jaeger -``` - -## Example - -See [../../example/jaeger](../../example/jaeger). - -## Configuration - -The exporter can be used to send spans to: - -- Jaeger agent using `jaeger.thrift` over compact thrift protocol via - [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option. -- Jaeger collector using `jaeger.thrift` over HTTP via - [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option. - -### Environment Variables - -The following environment variables can be used -(instead of options objects) to override the default configuration. - -| Environment variable | Option | Default value | -| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | -| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` | -| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` | -| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` | -| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | | -| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | | - -Configuration using options have precedence over the environment variables. - -## Contributing - -This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path. -When re-generating Thrift code in the future, please adapt import paths as necessary. - -## References - -- [Jaeger](https://www.jaegertracing.io/) -- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) -- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/sdk-environment-variables.md#jaeger-exporter) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go deleted file mode 100644 index a050020bb47..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "context" - "fmt" - "io" - "net" - "strings" - "time" - - "github.com/go-logr/logr" - - genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -const ( - // udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent. - udpPacketMaxLength = 65000 - // emitBatchOverhead is the additional overhead bytes used for enveloping the datagram, - // synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37 - emitBatchOverhead = 70 -) - -// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. -type agentClientUDP struct { - genAgent.Agent - io.Closer - - connUDP udpConn - client *genAgent.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span - thriftProtocol thrift.TProtocol -} - -type udpConn interface { - Write([]byte) (int, error) - SetWriteBuffer(int) error - Close() error -} - -type agentClientUDPParams struct { - Host string - Port string - MaxPacketSize int - Logger logr.Logger - AttemptReconnecting bool - AttemptReconnectInterval time.Duration -} - -// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { - hostPort := net.JoinHostPort(params.Host, params.Port) - // validate hostport - if _, _, err := net.SplitHostPort(hostPort); err != nil { - return nil, err - } - - if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength { - params.MaxPacketSize = udpPacketMaxLength - } - - if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { - params.AttemptReconnectInterval = time.Second * 30 - } - - thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{}) - thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory) - - var connUDP udpConn - var err error - - if params.AttemptReconnecting { - // host is hostname, setup resolver loop in case host record changes during operation - connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) - if err != nil { - return nil, err - } - } else { - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } - - connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - } - - if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { - return nil, err - } - - return &agentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: params.MaxPacketSize, - thriftBuffer: thriftBuffer, - thriftProtocol: thriftProtocol, - }, nil -} - -// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent. -func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error { - var errs []error - processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process) - if err != nil { - // drop the batch if serialization of process fails. - return err - } - - maxPacketSize := a.maxPacketSize - if maxPacketSize > udpPacketMaxLength-emitBatchOverhead { - maxPacketSize = udpPacketMaxLength - emitBatchOverhead - } - totalSize := processSize - var spans []*gen.Span - for _, span := range batch.Spans { - spanSize, err := a.calcSizeOfSerializedThrift(ctx, span) - if err != nil { - errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span)) - continue - } - if spanSize+processSize >= maxPacketSize { - // drop the span that exceeds the limit. - errs = append(errs, fmt.Errorf("span too large to send: %v", span)) - continue - } - if totalSize+spanSize >= maxPacketSize { - if err := a.flush(ctx, &gen.Batch{ - Process: batch.Process, - Spans: spans, - }); err != nil { - errs = append(errs, err) - } - spans = spans[:0] - totalSize = processSize - } - totalSize += spanSize - spans = append(spans, span) - } - - if len(spans) > 0 { - if err := a.flush(ctx, &gen.Batch{ - Process: batch.Process, - Spans: spans, - }); err != nil { - errs = append(errs, err) - } - } - - if len(errs) == 1 { - return errs[0] - } else if len(errs) > 1 { - joined := a.makeJoinedErrorString(errs) - return fmt.Errorf("multiple errors during transform: %s", joined) - } - return nil -} - -// makeJoinedErrorString join all the errors to one error message. -func (a *agentClientUDP) makeJoinedErrorString(errs []error) string { - var errMsgs []string - for _, err := range errs { - errMsgs = append(errMsgs, err.Error()) - } - return strings.Join(errMsgs, ", ") -} - -// flush will send the batch of spans to the agent. -func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error { - a.thriftBuffer.Reset() - if err := a.client.EmitBatch(ctx, batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// calcSizeOfSerializedThrift calculate the serialized thrift packet size. -func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) { - a.thriftBuffer.Reset() - err := thriftStruct.Write(ctx, a.thriftProtocol) - return a.thriftBuffer.Len(), err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *agentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go deleted file mode 100644 index a7359654110..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. -// -// Deprecated: This module is no longer supported. -// OpenTelemetry dropped support for Jaeger exporter in July 2023. -// Jaeger officially accepts and recommends using OTLP. -// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp] -// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead. -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go deleted file mode 100644 index 460fb5e1352..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "os" -) - -// Environment variable names. -const ( - // Hostname for the Jaeger agent, part of address where exporter sends spans - // i.e. "localhost". - envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST" - // Port for the Jaeger agent, part of address where exporter sends spans - // i.e. 6831. - envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT" - // The HTTP endpoint for sending spans directly to a collector, - // i.e. http://jaeger-collector:14268/api/traces. - envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT" - // Username to send as part of "Basic" authentication to the collector endpoint. - envUser = "OTEL_EXPORTER_JAEGER_USER" - // Password to send as part of "Basic" authentication to the collector endpoint. - envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD" -) - -// envOr returns an env variable's value if it is exists or the default if not. -func envOr(key, defaultValue string) string { - if v := os.Getenv(key); v != "" { - return v - } - return defaultValue -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go deleted file mode 100644 index 54cd3b0867a..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go deleted file mode 100644 index 3b96e3222ee..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go deleted file mode 100644 index c7c8e9ca3e6..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go +++ /dev/null @@ -1,412 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) - // Parameters: - // - Batch - EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) -} - -type AgentClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAgentClient(c thrift.TClient) *AgentClient { - return &AgentClient{ - c: c, - } -} - -func (p *AgentClient) Client_() thrift.TClient { - return p.c -} - -func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { - var _args0 AgentEmitZipkinBatchArgs - _args0.Spans = spans - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { - return err - } - return nil -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { - var _args1 AgentEmitBatchArgs - _args1.Batch = batch - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { - return err - } - return nil -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} - self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self2 -} - -func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x3.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x3 - -} - -type agentProcessorEmitZipkinBatch struct { - handler Agent -} - -func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} -} - -func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem4 := &zipkincore.Span{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Spans = append(p.Spans, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch - -func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go deleted file mode 100644 index fe45a9f9ad2..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go deleted file mode 100644 index 10162857fbb..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go deleted file mode 100644 index b1fe26c57d9..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go +++ /dev/null @@ -1,3022 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type TagType int64 - -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: - return "STRING" - case TagType_DOUBLE: - return "DOUBLE" - case TagType_BOOL: - return "BOOL" - case TagType_LONG: - return "LONG" - case TagType_BINARY: - return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": - return TagType_STRING, nil - case "DOUBLE": - return TagType_DOUBLE, nil - case "BOOL": - return TagType_BOOL, nil - case "LONG": - return TagType_LONG, nil - case "BINARY": - return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { - q, err := TagTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *TagType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = TagType(v) - return nil -} - -func (p *TagType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type SpanRefType int64 - -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: - return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: - return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": - return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": - return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { - q, err := SpanRefTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *SpanRefType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = SpanRefType(v) - return nil -} - -func (p *SpanRefType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" db:"key" json:"key"` - VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` - VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} - -var Tag_VStr_DEFAULT string - -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } - return *p.VStr -} - -var Tag_VDouble_DEFAULT float64 - -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } - return *p.VDouble -} - -var Tag_VBool_DEFAULT bool - -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } - return *p.VBool -} - -var Tag_VLong_DEFAULT int64 - -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } - return *p.VLong -} - -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false - var issetVType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetVType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) - } - if !issetVType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) - } - return nil -} - -func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := TagType(v) - p.VType = temp - } - return nil -} - -func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.VStr = &v - } - return nil -} - -func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.VDouble = &v - } - return nil -} - -func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.VBool = &v - } - return nil -} - -func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.VLong = &v - } - return nil -} - -func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.VBinary = v - } - return nil -} - -func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField7(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) - } - return err -} - -func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) - } - if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) - } - } - return err -} - -func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) - } - if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) - } - } - return err -} - -func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) - } - } - return err -} - -func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) - } - } - return err -} - -func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) - } - } - return err -} - -func (p *Tag) Equals(other *Tag) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { - return false - } - if p.VType != other.VType { - return false - } - if p.VStr != other.VStr { - if p.VStr == nil || other.VStr == nil { - return false - } - if (*p.VStr) != (*other.VStr) { - return false - } - } - if p.VDouble != other.VDouble { - if p.VDouble == nil || other.VDouble == nil { - return false - } - if (*p.VDouble) != (*other.VDouble) { - return false - } - } - if p.VBool != other.VBool { - if p.VBool == nil || other.VBool == nil { - return false - } - if (*p.VBool) != (*other.VBool) { - return false - } - } - if p.VLong != other.VLong { - if p.VLong == nil || other.VLong == nil { - return false - } - if (*p.VLong) != (*other.VLong) { - return false - } - } - if bytes.Compare(p.VBinary, other.VBinary) != 0 { - return false - } - return true -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false - var issetFields bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTimestamp = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetFields = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) - } - if !issetFields { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) - } - return nil -} - -func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i++ { - _elem0 := &Tag{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) - } - return err -} - -func (p *Log) Equals(other *Log) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { - return false - } - if len(p.Fields) != len(other.Fields) { - return false - } - for i, _tgt := range p.Fields { - _src1 := other.Fields[i] - if !_tgt.Equals(_src1) { - return false - } - } - return true -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetRefType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - return nil -} - -func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SpanRefType(v) - p.RefType = temp - } - return nil -} - -func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) - } - return err -} - -func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) - } - return err -} - -func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) - } - return err -} - -func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) - } - return err -} - -func (p *SpanRef) Equals(other *SpanRef) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.RefType != other.RefType { - return false - } - if p.TraceIdLow != other.TraceIdLow { - return false - } - if p.TraceIdHigh != other.TraceIdHigh { - return false - } - if p.SpanId != other.SpanId { - return false - } - return true -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` - References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` - Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` - Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} - -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} - -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} - -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - var issetParentSpanId bool = false - var issetOperationName bool = false - var issetFlags bool = false - var issetStartTime bool = false - var issetDuration bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetParentSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - issetOperationName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - issetFlags = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - issetStartTime = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - issetDuration = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.LIST { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.LIST { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - if !issetParentSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) - } - if !issetOperationName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) - } - if !issetFlags { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) - } - if !issetStartTime { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) - } - if !issetDuration { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ParentSpanId = v - } - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.OperationName = v - } - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i++ { - _elem2 := &SpanRef{} - if err := _elem2.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.References = append(p.References, _elem2) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Flags = v - } - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Duration = v - } - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem3 := &Tag{} - if err := _elem3.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Tags = append(p.Tags, _elem3) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i++ { - _elem4 := &Log{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Logs = append(p.Logs, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField7(ctx, oprot); err != nil { - return err - } - if err := p.writeField8(ctx, oprot); err != nil { - return err - } - if err := p.writeField9(ctx, oprot); err != nil { - return err - } - if err := p.writeField10(ctx, oprot); err != nil { - return err - } - if err := p.writeField11(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) - } - return err -} - -func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) - } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) - } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) - } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) - } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) - } - } - return err -} - -func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) - } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) - } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) - } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) - } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceIdLow != other.TraceIdLow { - return false - } - if p.TraceIdHigh != other.TraceIdHigh { - return false - } - if p.SpanId != other.SpanId { - return false - } - if p.ParentSpanId != other.ParentSpanId { - return false - } - if p.OperationName != other.OperationName { - return false - } - if len(p.References) != len(other.References) { - return false - } - for i, _tgt := range p.References { - _src5 := other.References[i] - if !_tgt.Equals(_src5) { - return false - } - } - if p.Flags != other.Flags { - return false - } - if p.StartTime != other.StartTime { - return false - } - if p.Duration != other.Duration { - return false - } - if len(p.Tags) != len(other.Tags) { - return false - } - for i, _tgt := range p.Tags { - _src6 := other.Tags[i] - if !_tgt.Equals(_src6) { - return false - } - } - if len(p.Logs) != len(other.Logs) { - return false - } - for i, _tgt := range p.Logs { - _src7 := other.Logs[i] - if !_tgt.Equals(_src7) { - return false - } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - -func (p *Process) GetServiceName() string { - return p.ServiceName -} - -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetServiceName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) - } - return nil -} - -func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem8 := &Tag{} - if err := _elem8.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) - } - p.Tags = append(p.Tags, _elem8) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) - } - } - return err -} - -func (p *Process) Equals(other *Process) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.ServiceName != other.ServiceName { - return false - } - if len(p.Tags) != len(other.Tags) { - return false - } - for i, _tgt := range p.Tags { - _src9 := other.Tags[i] - if !_tgt.Equals(_src9) { - return false - } - } - return true -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - FullQueueDroppedSpans -// - TooLargeDroppedSpans -// - FailedToEmitSpans -type ClientStats struct { - FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` - TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` - FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` -} - -func NewClientStats() *ClientStats { - return &ClientStats{} -} - -func (p *ClientStats) GetFullQueueDroppedSpans() int64 { - return p.FullQueueDroppedSpans -} - -func (p *ClientStats) GetTooLargeDroppedSpans() int64 { - return p.TooLargeDroppedSpans -} - -func (p *ClientStats) GetFailedToEmitSpans() int64 { - return p.FailedToEmitSpans -} -func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFullQueueDroppedSpans bool = false - var issetTooLargeDroppedSpans bool = false - var issetFailedToEmitSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetFullQueueDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTooLargeDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetFailedToEmitSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFullQueueDroppedSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")) - } - if !issetTooLargeDroppedSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")) - } - if !issetFailedToEmitSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")) - } - return nil -} - -func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.FullQueueDroppedSpans = v - } - return nil -} - -func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TooLargeDroppedSpans = v - } - return nil -} - -func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.FailedToEmitSpans = v - } - return nil -} - -func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) - } - return err -} - -func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) - } - return err -} - -func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) - } - return err -} - -func (p *ClientStats) Equals(other *ClientStats) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { - return false - } - if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { - return false - } - if p.FailedToEmitSpans != other.FailedToEmitSpans { - return false - } - return true -} - -func (p *ClientStats) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ClientStats(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -// - SeqNo -// - Stats -type Batch struct { - Process *Process `thrift:"process,1,required" db:"process" json:"process"` - Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` - SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` - Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process - -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } - return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} - -var Batch_SeqNo_DEFAULT int64 - -func (p *Batch) GetSeqNo() int64 { - if !p.IsSetSeqNo() { - return Batch_SeqNo_DEFAULT - } - return *p.SeqNo -} - -var Batch_Stats_DEFAULT *ClientStats - -func (p *Batch) GetStats() *ClientStats { - if !p.IsSetStats() { - return Batch_Stats_DEFAULT - } - return p.Stats -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) IsSetSeqNo() bool { - return p.SeqNo != nil -} - -func (p *Batch) IsSetStats() bool { - return p.Stats != nil -} - -func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false - var issetSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetProcess = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) - } - if !issetSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) - } - return nil -} - -func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem10 := &Span{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Spans = append(p.Spans, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SeqNo = &v - } - return nil -} - -func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Stats = &ClientStats{} - if err := p.Stats.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) - } - return nil -} - -func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) - } - if err := p.Process.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) - } - return err -} - -func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) - } - return err -} - -func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSeqNo() { - if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) - } - } - return err -} - -func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStats() { - if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) - } - if err := p.Stats.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) - } - } - return err -} - -func (p *Batch) Equals(other *Batch) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.Process.Equals(other.Process) { - return false - } - if len(p.Spans) != len(other.Spans) { - return false - } - for i, _tgt := range p.Spans { - _src11 := other.Spans[i] - if !_tgt.Equals(_src11) { - return false - } - } - if p.SeqNo != other.SeqNo { - if p.SeqNo == nil || other.SeqNo == nil { - return false - } - if (*p.SeqNo) != (*other.SeqNo) { - return false - } - } - if !p.Stats.Equals(other.Stats) { - return false - } - return true -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { - return false - } - return true -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} - -type Collector interface { - // Parameters: - // - Batches - SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) -} - -type CollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewCollectorClient(c thrift.TClient) *CollectorClient { - return &CollectorClient{ - c: c, - } -} - -func (p *CollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Batches -func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { - var _args12 CollectorSubmitBatchesArgs - _args12.Batches = batches - var _result14 CollectorSubmitBatchesResult - var _meta13 thrift.ResponseMeta - _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) - p.SetLastResponseMeta_(_meta13) - if _err != nil { - return - } - return _result14.GetSuccess(), nil -} - -type CollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Collector -} - -func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewCollectorProcessor(handler Collector) *CollectorProcessor { - - self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} - return self15 -} - -func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x16.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x16 - -} - -type collectorProcessorSubmitBatches struct { - handler Collector -} - -func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := CollectorSubmitBatchesArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := CollectorSubmitBatchesResult{} - var retval []*BatchSubmitResponse - if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batches -type CollectorSubmitBatchesArgs struct { - Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` -} - -func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { - return &CollectorSubmitBatchesArgs{} -} - -func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { - return p.Batches -} -func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Batch, 0, size) - p.Batches = tSlice - for i := 0; i < size; i++ { - _elem17 := &Batch{} - if err := _elem17.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) - } - p.Batches = append(p.Batches, _elem17) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Batches { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) - } - return err -} - -func (p *CollectorSubmitBatchesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) -} - -// Attributes: -// - Success -type CollectorSubmitBatchesResult struct { - Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { - return &CollectorSubmitBatchesResult{} -} - -var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse - -func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { - return p.Success -} -func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BatchSubmitResponse, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem18 := &BatchSubmitResponse{} - if err := _elem18.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) - } - p.Success = append(p.Success, _elem18) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *CollectorSubmitBatchesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go deleted file mode 100644 index ebf43018fe7..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -var GoUnusedProtection__ int; - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go deleted file mode 100644 index 043ecba9626..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -import ( - "bytes" - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -const CLIENT_SEND = "cs" -const CLIENT_RECV = "cr" -const SERVER_SEND = "ss" -const SERVER_RECV = "sr" -const MESSAGE_SEND = "ms" -const MESSAGE_RECV = "mr" -const WIRE_SEND = "ws" -const WIRE_RECV = "wr" -const CLIENT_SEND_FRAGMENT = "csf" -const CLIENT_RECV_FRAGMENT = "crf" -const SERVER_SEND_FRAGMENT = "ssf" -const SERVER_RECV_FRAGMENT = "srf" -const LOCAL_COMPONENT = "lc" -const CLIENT_ADDR = "ca" -const SERVER_ADDR = "sa" -const MESSAGE_ADDR = "ma" - -func init() { -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go deleted file mode 100644 index 7f46810e0d3..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go +++ /dev/null @@ -1,2067 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type AnnotationType int64 - -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: - return "BOOL" - case AnnotationType_BYTES: - return "BYTES" - case AnnotationType_I16: - return "I16" - case AnnotationType_I32: - return "I32" - case AnnotationType_I64: - return "I64" - case AnnotationType_DOUBLE: - return "DOUBLE" - case AnnotationType_STRING: - return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": - return AnnotationType_BOOL, nil - case "BYTES": - return AnnotationType_BYTES, nil - case "I16": - return AnnotationType_I16, nil - case "I32": - return AnnotationType_I32, nil - case "I64": - return AnnotationType_I64, nil - case "DOUBLE": - return AnnotationType_DOUBLE, nil - case "STRING": - return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { - q, err := AnnotationTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *AnnotationType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = AnnotationType(v) - return nil -} - -func (p *AnnotationType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` - Port int16 `thrift:"port,2" db:"port" json:"port"` - ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` - Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} - -var Endpoint_Ipv6_DEFAULT []byte - -func (p *Endpoint) GetIpv6() []byte { - return p.Ipv6 -} -func (p *Endpoint) IsSetIpv6() bool { - return p.Ipv6 != nil -} - -func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I16 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ipv4 = v - } - return nil -} - -func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Port = v - } - return nil -} - -func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Ipv6 = v - } - return nil -} - -func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) - } - return err -} - -func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) - } - if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) - } - return err -} - -func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) - } - return err -} - -func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIpv6() { - if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) - } - } - return err -} - -func (p *Endpoint) Equals(other *Endpoint) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ipv4 != other.Ipv4 { - return false - } - if p.Port != other.Port { - return false - } - if p.ServiceName != other.ServiceName { - return false - } - if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { - return false - } - return true -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` - Value string `thrift:"value,2" db:"value" json:"value"` - Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} - -var Annotation_Host_DEFAULT *Endpoint - -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } - return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) - } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) - } - } - return err -} - -func (p *Annotation) Equals(other *Annotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { - return false - } - if p.Value != other.Value { - return false - } - if !p.Host.Equals(other.Host) { - return false - } - return true -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" db:"key" json:"key"` - Value []byte `thrift:"value,2" db:"value" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} - -var BinaryAnnotation_Host_DEFAULT *Endpoint - -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } - return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := AnnotationType(v) - p.AnnotationType = temp - } - return nil -} - -func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField2(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteBinary(ctx, p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) - } - if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) - } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) - } - } - return err -} - -func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { - return false - } - if bytes.Compare(p.Value, other.Value) != 0 { - return false - } - if p.AnnotationType != other.AnnotationType { - return false - } - if !p.Host.Equals(other.Host) { - return false - } - return true -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this -// means the trace uses 128 bit traceIds instead of 64 bit. -type Span struct { - TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" db:"name" json:"name"` - ID int64 `thrift:"id,4" db:"id" json:"id"` - ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` - Debug bool `thrift:"debug,9" db:"debug" json:"debug"` - Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` - TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} - -var Span_ParentID_DEFAULT int64 - -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } - return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} - -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} - -var Span_Timestamp_DEFAULT int64 - -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var Span_Duration_DEFAULT int64 - -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } - return *p.Duration -} - -var Span_TraceIDHigh_DEFAULT int64 - -func (p *Span) GetTraceIDHigh() int64 { - if !p.IsSetTraceIDHigh() { - return Span_TraceIDHigh_DEFAULT - } - return *p.TraceIDHigh -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) IsSetTraceIDHigh() bool { - return p.TraceIDHigh != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.LIST { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.I64 { - if err := p.ReadField12(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceID = v - } - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Name = v - } - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ID = v - } - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ParentID = &v - } - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i++ { - _elem0 := &Annotation{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Debug = v - } - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Duration = &v - } - return nil -} - -func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 12: ", err) - } else { - p.TraceIDHigh = &v - } - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - if err := p.writeField3(ctx, oprot); err != nil { - return err - } - if err := p.writeField4(ctx, oprot); err != nil { - return err - } - if err := p.writeField5(ctx, oprot); err != nil { - return err - } - if err := p.writeField6(ctx, oprot); err != nil { - return err - } - if err := p.writeField8(ctx, oprot); err != nil { - return err - } - if err := p.writeField9(ctx, oprot); err != nil { - return err - } - if err := p.writeField10(ctx, oprot); err != nil { - return err - } - if err := p.writeField11(ctx, oprot); err != nil { - return err - } - if err := p.writeField12(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) - } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) - } - if err := oprot.WriteString(ctx, string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) - } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) - } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) - } - } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) - } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) - } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) - } - } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) - } - } - return err -} - -func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTraceIDHigh() { - if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) - } - if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) - } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceID != other.TraceID { - return false - } - if p.Name != other.Name { - return false - } - if p.ID != other.ID { - return false - } - if p.ParentID != other.ParentID { - if p.ParentID == nil || other.ParentID == nil { - return false - } - if (*p.ParentID) != (*other.ParentID) { - return false - } - } - if len(p.Annotations) != len(other.Annotations) { - return false - } - for i, _tgt := range p.Annotations { - _src2 := other.Annotations[i] - if !_tgt.Equals(_src2) { - return false - } - } - if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { - return false - } - for i, _tgt := range p.BinaryAnnotations { - _src3 := other.BinaryAnnotations[i] - if !_tgt.Equals(_src3) { - return false - } - } - if p.Debug != other.Debug { - return false - } - if p.Timestamp != other.Timestamp { - if p.Timestamp == nil || other.Timestamp == nil { - return false - } - if (*p.Timestamp) != (*other.Timestamp) { - return false - } - } - if p.Duration != other.Duration { - if p.Duration == nil || other.Duration == nil { - return false - } - if (*p.Duration) != (*other.Duration) { - return false - } - } - if p.TraceIDHigh != other.TraceIDHigh { - if p.TraceIDHigh == nil || other.TraceIDHigh == nil { - return false - } - if (*p.TraceIDHigh) != (*other.TraceIDHigh) { - return false - } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *Response) Equals(other *Response) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { - return false - } - return true -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) -} - -type ZipkinCollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: c, - } -} - -func (p *ZipkinCollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { - var _args4 ZipkinCollectorSubmitZipkinBatchArgs - _args4.Spans = spans - var _result6 ZipkinCollectorSubmitZipkinBatchResult - var _meta5 thrift.ResponseMeta - _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) - p.SetLastResponseMeta_(_meta5) - if _err != nil { - return - } - return _result6.GetSuccess(), nil -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} - return self7 -} - -func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { - return false, thrift.WrapTException(err2) - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x8.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x8 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem9 := &Span{} - if err := _elem9.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) - } - p.Spans = append(p.Spans, _elem9) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem10 := &Response{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Success = append(p.Success, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE deleted file mode 100644 index 2bc6fbbf65c..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE +++ /dev/null @@ -1,306 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) - --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp - -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ - - --------------------------------------------------- -For lib/py/compat/win32/stdint.h - -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - - --------------------------------------------------- -Codegen template in t_html_generator.h - -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. - ---------------------------------------------------- -For t_cl_generator.cc - - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook - ---------------------------------------------------- diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE deleted file mode 100644 index 37824e7fb66..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go deleted file mode 100644 index 32d5b0147a2..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 - INVALID_TRANSFORM = 8 - INVALID_PROTOCOL = 9 - UNSUPPORTED_CLIENT_TYPE = 10 -) - -var defaultApplicationExceptionMessage = map[int32]string{ - UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", - UNKNOWN_METHOD: "unknown method", - INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", - WRONG_METHOD_NAME: "wrong method name", - BAD_SEQUENCE_ID: "bad sequence ID", - MISSING_RESULT: "missing result", - INTERNAL_ERROR: "unknown internal error", - PROTOCOL_ERROR: "unknown protocol error", - INVALID_TRANSFORM: "Invalid transform", - INVALID_PROTOCOL: "Invalid protocol", - UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", -} - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(ctx context.Context, iprot TProtocol) error - Write(ctx context.Context, oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -var _ TApplicationException = (*tApplicationException)(nil) - -func (tApplicationException) TExceptionType() TExceptionType { - return TExceptionTypeApplication -} - -func (e tApplicationException) Error() string { - if e.message != "" { - return e.message - } - return defaultApplicationExceptionMessage[e.type_] -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { - // TODO: this should really be generated by the compiler - _, err := iprot.ReadStructBegin(ctx) - if err != nil { - return err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - default: - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - if err = iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return err - } - - p.message = message - p.type_ = type_ - - return nil -} - -func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { - err = oprot.WriteStructBegin(ctx, "TApplicationException") - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(ctx, p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - } - err = oprot.WriteFieldBegin(ctx, "type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(ctx, p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - err = oprot.WriteFieldStop(ctx) - if err != nil { - return - } - err = oprot.WriteStructEnd(ctx) - return -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 45c880d32f8..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - cfg *TConfiguration - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { - PropagateTConfiguration(t, conf) - p := &TBinaryProtocol{ - origTransport: t, - cfg: conf, - } - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - return p -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{ - cfg: conf, - } -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocolConf(t, p.cfg) -} - -func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - if p.cfg.GetTBinaryStrictWrite() { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(ctx, int32(version)) - if e != nil { - return e - } - e = p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } else { - e := p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - e := p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(ctx, id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { - e := p.WriteByte(ctx, STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - e := p.WriteByte(ctx, int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { - if value { - return p.WriteByte(ctx, 1) - } - return p.WriteByte(ctx, 0) -} - -func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.trans.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.WriteI64(ctx, int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.cfg.GetTBinaryStrictRead() { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte(ctx) - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32(ctx) - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte(ctx) - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16(ctx) - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { - k, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { - b, e := p.ReadByte(ctx) - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(ctx, buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(ctx, buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", e - } - err = checkSizeForProtocol(size, p.cfg) - if err != nil { - return - } - if size < 0 { - err = invalidDataLength - return - } - if size == 0 { - return "", nil - } - if size < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:size] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - size, e := p.ReadI32(ctx) - if e != nil { - return nil, e - } - if err := checkSizeForProtocol(size, p.cfg); err != nil { - return nil, err - } - - buf, err := safeReadBytes(size, p.trans) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { - var read int - _, deadlineSet := ctx.Deadline() - for { - read, err = io.ReadFull(p.trans, buf) - if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout without anything read, - // and we still have time left, keep retrying. - continue - } - // For anything else, don't retry - break - } - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - buf, err := safeReadBytes(size, p.trans) - return string(buf), NewTProtocolException(err) -} - -func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) - _ TConfigurationSetter = (*TBinaryProtocol)(nil) -) - -// This function is shared between TBinaryProtocol and TCompactProtocol. -// -// It tries to read size bytes from trans, in a way that prevents large -// allocations when size is insanely large (mostly caused by malformed message). -func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { - if size < 0 { - return nil, nil - } - - buf := new(bytes.Buffer) - _, err := io.CopyN(buf, trans, int64(size)) - return buf.Bytes(), err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go deleted file mode 100644 index aa551b4ab37..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" -) - -type TBufferedTransportFactory struct { - size int -} - -type TBufferedTransport struct { - bufio.ReadWriter - tp TTransport -} - -func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return NewTBufferedTransport(trans, p.size), nil -} - -func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { - return &TBufferedTransportFactory{size: bufferSize} -} - -func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { - return &TBufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *TBufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *TBufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *TBufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *TBufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Flush(ctx context.Context) error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush(ctx) -} - -func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.tp, conf) -} - -var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go deleted file mode 100644 index ea2c01fdadb..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go +++ /dev/null @@ -1,109 +0,0 @@ -package thrift - -import ( - "context" - "fmt" -) - -// ResponseMeta represents the metadata attached to the response. -type ResponseMeta struct { - // The headers in the response, if any. - // If the underlying transport/protocol is not THeader, this will always be nil. - Headers THeaderMap -} - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -type TStandardClient struct { - seqId int32 - iprot, oprot TProtocol -} - -// TStandardClient implements TClient, and uses the standard message format for Thrift. -// It is not safe for concurrent use. -func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { - return &TStandardClient{ - iprot: inputProtocol, - oprot: outputProtocol, - } -} - -func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { - // Set headers from context object on THeaderProtocol - if headerProt, ok := oprot.(*THeaderProtocol); ok { - headerProt.ClearWriteHeaders() - for _, key := range GetWriteHeaderList(ctx) { - if value, ok := GetHeader(ctx, key); ok { - headerProt.SetWriteHeader(key, value) - } - } - } - - if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { - return err - } - if err := args.Write(ctx, oprot); err != nil { - return err - } - if err := oprot.WriteMessageEnd(ctx); err != nil { - return err - } - return oprot.Flush(ctx) -} - -func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { - rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) - if err != nil { - return err - } - - if method != rMethod { - return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) - } else if seqId != rSeqId { - return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) - } else if rTypeId == EXCEPTION { - var exception tApplicationException - if err := exception.Read(ctx, iprot); err != nil { - return err - } - - if err := iprot.ReadMessageEnd(ctx); err != nil { - return err - } - - return &exception - } else if rTypeId != REPLY { - return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) - } - - if err := result.Read(ctx, iprot); err != nil { - return err - } - - return iprot.ReadMessageEnd(ctx) -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - p.seqId++ - seqId := p.seqId - - if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { - return ResponseMeta{}, err - } - - // method is oneway - if result == nil { - return ResponseMeta{}, nil - } - - err := p.Recv(ctx, p.iprot, seqId, method, result) - var headers THeaderMap - if hp, ok := p.iprot.(*THeaderProtocol); ok { - headers = hp.transport.readHeaders - } - return ResponseMeta{ - Headers: headers, - }, err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go deleted file mode 100644 index a49225dabfb..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,865 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTCompactProtocolFactoryConf instead. -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return NewTCompactProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { - return &TCompactProtocolFactory{ - cfg: conf, - } -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocolConf(trans, p.cfg) -} - -func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - cfg *TConfiguration - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Deprecated: Use NewTCompactProtocolConf instead. -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - return NewTCompactProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { - PropagateTConfiguration(trans, conf) - p := &TCompactProtocol{ - origTransport: trans, - cfg: conf, - } - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(ctx, name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(ctx, id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - var protocolId byte - - _, deadlineSet := ctx.Deadline() - for { - protocolId, err = p.readByteDirect() - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // keep retrying I/O timeout errors since we still have - // time left - continue - } - // For anything else, don't retry - break - } - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString(ctx) - return -} - -func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { - // consume the last field we read off the wire. - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16(ctx) - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.ReadListBegin(ctx) -} - -func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { - v, err := p.ReadI32(ctx) - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return "", nil - } - if length < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:length] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - buf, e := safeReadBytes(length, p.trans) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return []byte{}, nil - } - - buf, e := safeReadBytes(length, p.trans) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} - -func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) - _ TConfigurationSetter = (*TCompactProtocol)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go deleted file mode 100644 index 454d9f37748..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "fmt" - "time" -) - -// Default TConfiguration values. -const ( - DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 - DEFAULT_MAX_FRAME_SIZE = 16384000 - - DEFAULT_TBINARY_STRICT_READ = false - DEFAULT_TBINARY_STRICT_WRITE = true - - DEFAULT_CONNECT_TIMEOUT = 0 - DEFAULT_SOCKET_TIMEOUT = 0 -) - -// TConfiguration defines some configurations shared between TTransport, -// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. -// -// When constructing TConfiguration, you only need to specify the non-default -// fields. All zero values have sane default values. -// -// Not all configurations defined are applicable to all implementations. -// Implementations are free to ignore the configurations not applicable to them. -// -// All functions attached to this type are nil-safe. -// -// See [1] for spec. -// -// NOTE: When using TConfiguration, fill in all the configurations you want to -// set across the stack, not only the ones you want to set in the immediate -// TTransport/TProtocol. -// -// For example, say you want to migrate this old code into using TConfiguration: -// -// sccket := thrift.NewTSocketTimeout("host:port", time.Second) -// transFactory := thrift.NewTFramedTransportFactoryMaxLength( -// thrift.NewTTransportFactory(), -// 1024 * 1024 * 256, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) -// -// This is the wrong way to do it because in the end the TConfiguration used by -// socket and transFactory will be overwritten by the one used by protoFactory -// because of TConfiguration propagation: -// -// // bad example, DO NOT USE -// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// }) -// transFactory := thrift.NewTFramedTransportFactoryConf( -// thrift.NewTTransportFactory(), -// &thrift.TConfiguration{ -// MaxFrameSize: 1024 * 1024 * 256, -// }, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// }) -// -// This is the correct way to do it: -// -// conf := &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// -// MaxFrameSize: 1024 * 1024 * 256, -// -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// } -// sccket := thrift.NewTSocketConf("host:port", conf) -// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) -// -// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md -type TConfiguration struct { - // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. - MaxMessageSize int32 - - // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. - // - // Also if MaxMessageSize < MaxFrameSize, - // MaxMessageSize will be used instead. - MaxFrameSize int32 - - // Connect and socket timeouts to be used by TSocket and TSSLSocket. - // - // 0 means no timeout. - // - // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be - // used. - ConnectTimeout time.Duration - SocketTimeout time.Duration - - // TLS config to be used by TSSLSocket. - TLSConfig *tls.Config - - // Strict read/write configurations for TBinaryProtocol. - // - // BoolPtr helper function is available to use literal values. - TBinaryStrictRead *bool - TBinaryStrictWrite *bool - - // The wrapped protocol id to be used in THeader transport/protocol. - // - // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions - // are provided to help filling this value. - THeaderProtocolID *THeaderProtocolID - - // Used internally by deprecated constructors, to avoid overriding - // underlying TTransport/TProtocol's cfg by accidental propagations. - // - // For external users this is always false. - noPropagation bool -} - -// GetMaxMessageSize returns the max message size an implementation should -// follow. -// -// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. -func (tc *TConfiguration) GetMaxMessageSize() int32 { - if tc == nil || tc.MaxMessageSize <= 0 { - return DEFAULT_MAX_MESSAGE_SIZE - } - return tc.MaxMessageSize -} - -// GetMaxFrameSize returns the max frame size an implementation should follow. -// -// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. -// -// If the configured max message size is smaller than the configured max frame -// size, the smaller one will be returned instead. -func (tc *TConfiguration) GetMaxFrameSize() int32 { - if tc == nil { - return DEFAULT_MAX_FRAME_SIZE - } - maxFrameSize := tc.MaxFrameSize - if maxFrameSize <= 0 { - maxFrameSize = DEFAULT_MAX_FRAME_SIZE - } - if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { - return maxMessageSize - } - return maxFrameSize -} - -// GetConnectTimeout returns the connect timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetConnectTimeout() time.Duration { - if tc == nil || tc.ConnectTimeout < 0 { - return DEFAULT_CONNECT_TIMEOUT - } - return tc.ConnectTimeout -} - -// GetSocketTimeout returns the socket timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetSocketTimeout() time.Duration { - if tc == nil || tc.SocketTimeout < 0 { - return DEFAULT_SOCKET_TIMEOUT - } - return tc.SocketTimeout -} - -// GetTLSConfig returns the tls config should be used by TSSLSocket. -// -// It's nil-safe. If tc is nil, nil will be returned instead. -func (tc *TConfiguration) GetTLSConfig() *tls.Config { - if tc == nil { - return nil - } - return tc.TLSConfig -} - -// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or -// tc.TBinaryStrictRead is nil. -func (tc *TConfiguration) GetTBinaryStrictRead() bool { - if tc == nil || tc.TBinaryStrictRead == nil { - return DEFAULT_TBINARY_STRICT_READ - } - return *tc.TBinaryStrictRead -} - -// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or -// tc.TBinaryStrictWrite is nil. -func (tc *TConfiguration) GetTBinaryStrictWrite() bool { - if tc == nil || tc.TBinaryStrictWrite == nil { - return DEFAULT_TBINARY_STRICT_WRITE - } - return *tc.TBinaryStrictWrite -} - -// GetTHeaderProtocolID returns the THeaderProtocolID should be used by -// THeaderProtocol clients (for servers, they always use the same one as the -// client instead). -// -// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, -// THeaderProtocolDefault will be returned instead. -// THeaderProtocolDefault will also be returned if configured value is invalid. -func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { - if tc == nil || tc.THeaderProtocolID == nil { - return THeaderProtocolDefault - } - protoID := *tc.THeaderProtocolID - if err := protoID.Validate(); err != nil { - return THeaderProtocolDefault - } - return protoID -} - -// THeaderProtocolIDPtr validates and returns the pointer to id. -// -// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault -// and the validation error will be returned. -func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { - err := id.Validate() - if err != nil { - id = THeaderProtocolDefault - } - return &id, err -} - -// THeaderProtocolIDPtrMust validates and returns the pointer to id. -// -// It's similar to THeaderProtocolIDPtr, but it panics on validation errors -// instead of returning them. -func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { - ptr, err := THeaderProtocolIDPtr(id) - if err != nil { - panic(err) - } - return ptr -} - -// TConfigurationSetter is an optional interface TProtocol, TTransport, -// TProtocolFactory, TTransportFactory, and other implementations can implement. -// -// It's intended to be called during intializations. -// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the -// middle of a message is undefined: -// It may or may not change the behavior of the current processing message, -// and it may even cause the current message to fail. -// -// Note for implementations: SetTConfiguration might be called multiple times -// with the same value in quick successions due to the implementation of the -// propagation. Implementations should make SetTConfiguration as simple as -// possible (usually just overwrite the stored configuration and propagate it to -// the wrapped TTransports/TProtocols). -type TConfigurationSetter interface { - SetTConfiguration(*TConfiguration) -} - -// PropagateTConfiguration propagates cfg to impl if impl implements -// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. -// -// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration -// with everything being default value, use &TConfiguration{} explicitly instead. -func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { - if cfg == nil || cfg.noPropagation { - return - } - - if setter, ok := impl.(TConfigurationSetter); ok { - setter.SetTConfiguration(cfg) - } -} - -func checkSizeForProtocol(size int32, cfg *TConfiguration) error { - if size < 0 { - return NewTProtocolExceptionWithType( - NEGATIVE_SIZE, - fmt.Errorf("negative size: %d", size), - ) - } - if size > cfg.GetMaxMessageSize() { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - fmt.Errorf("size exceeded max allowed: %d", size), - ) - } - return nil -} - -type tTransportFactoryConf struct { - delegate TTransportFactory - cfg *TConfiguration -} - -func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { - trans, err := f.delegate.GetTransport(orig) - if err == nil { - PropagateTConfiguration(orig, f.cfg) - PropagateTConfiguration(trans, f.cfg) - } - return trans, err -} - -func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TTransportFactoryConf wraps a TTransportFactory to propagate -// TConfiguration on the factory's GetTransport calls. -func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { - return &tTransportFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -type tProtocolFactoryConf struct { - delegate TProtocolFactory - cfg *TConfiguration -} - -func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { - proto := f.delegate.GetProtocol(trans) - PropagateTConfiguration(trans, f.cfg) - PropagateTConfiguration(proto, f.cfg) - return proto -} - -func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TProtocolFactoryConf wraps a TProtocolFactory to propagate -// TConfiguration on the factory's GetProtocol calls. -func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { - return &tProtocolFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -var ( - _ TConfigurationSetter = (*tTransportFactoryConf)(nil) - _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go deleted file mode 100644 index d15c1bcf894..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -var defaultCtx = context.Background() diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index fdf9bfec15e..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" -) - -type TDebugProtocol struct { - // Required. The actual TProtocol to do the read/write. - Delegate TProtocol - - // Optional. The logger and prefix to log all the args/return values - // from Delegate TProtocol calls. - // - // If Logger is nil, StdLogger using stdlib log package with os.Stderr - // will be used. If disable logging is desired, set Logger to NopLogger - // explicitly instead of leaving it as nil/unset. - Logger Logger - LogPrefix string - - // Optional. An TProtocol to duplicate everything read/written from Delegate. - // - // A typical use case of this is to use TSimpleJSONProtocol wrapping - // TMemoryBuffer in a middleware to json logging requests/responses. - // - // This feature is not available from TDebugProtocolFactory. In order to - // use it you have to construct TDebugProtocol directly, or set DuplicateTo - // field after getting a TDebugProtocol from the factory. - DuplicateTo TProtocol -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string - Logger Logger -} - -// NewTDebugProtocolFactory creates a TDebugProtocolFactory. -// -// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct -// itself instead. This version will use the default logger from standard -// library. -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: StdLogger(nil), - } -} - -// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. -func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: logger, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - Logger: fallbackLogger(t.Logger), - } -} - -func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { - fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) -} - -func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) - tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMessageEnd(ctx) - tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { - err := tdp.Delegate.WriteStructBegin(ctx, name) - tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return err -} -func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { - err := tdp.Delegate.WriteStructEnd(ctx) - tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) - tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { - err := tdp.Delegate.WriteFieldEnd(ctx) - tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { - err := tdp.Delegate.WriteFieldStop(ctx) - tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldStop(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) - tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMapEnd(ctx) - tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(ctx, elemType, size) - tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { - err := tdp.Delegate.WriteListEnd(ctx) - tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) - tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { - err := tdp.Delegate.WriteSetEnd(ctx) - tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { - err := tdp.Delegate.WriteBool(ctx, value) - tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { - err := tdp.Delegate.WriteByte(ctx, value) - tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { - err := tdp.Delegate.WriteI16(ctx, value) - tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { - err := tdp.Delegate.WriteI32(ctx, value) - tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { - err := tdp.Delegate.WriteI64(ctx, value) - tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { - err := tdp.Delegate.WriteDouble(ctx, value) - tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { - err := tdp.Delegate.WriteString(ctx, value) - tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { - err := tdp.Delegate.WriteBinary(ctx, value) - tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) - tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return -} -func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMessageEnd(ctx) - tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin(ctx) - tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return -} -func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadStructEnd(ctx) - tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) - tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return -} -func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadFieldEnd(ctx) - tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) - tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return -} -func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMapEnd(ctx) - tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin(ctx) - tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadListEnd(ctx) - tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) - tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadSetEnd(ctx) - tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { - value, err = tdp.Delegate.ReadBool(ctx) - tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { - value, err = tdp.Delegate.ReadByte(ctx) - tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { - value, err = tdp.Delegate.ReadI16(ctx) - tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { - value, err = tdp.Delegate.ReadI32(ctx) - tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { - value, err = tdp.Delegate.ReadI64(ctx) - tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - value, err = tdp.Delegate.ReadDouble(ctx) - tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { - value, err = tdp.Delegate.ReadString(ctx) - tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary(ctx) - tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return -} -func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - err = tdp.Delegate.Skip(ctx, fieldType) - tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Skip(ctx, fieldType) - } - return -} -func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { - err = tdp.Delegate.Flush(ctx) - tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Flush(ctx) - } - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(tdp.Delegate, conf) - PropagateTConfiguration(tdp.DuplicateTo, conf) -} - -var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index cefc7ecda5d..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TDeserializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -// TDeserializerPool is the thread-safe version of TDeserializer, -// it uses resource pool of TDeserializer under the hood. -// -// It must be initialized with either NewTDeserializerPool or -// NewTDeserializerPoolSizeFactory. -type TDeserializerPool struct { - pool sync.Pool -} - -// NewTDeserializerPool creates a new TDeserializerPool. -// -// NewTDeserializer can be used as the arg here. -func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with -// the given size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.ReadString(ctx, msg, s) -} - -func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.Read(ctx, msg, b) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go deleted file mode 100644 index 630b938f004..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error - - TExceptionType() TExceptionType -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - msg := prepend + err.Error() - - var te TException - if errors.As(err, &te) { - switch te.TExceptionType() { - case TExceptionTypeTransport: - if t, ok := err.(TTransportException); ok { - return prependTTransportException(prepend, t) - } - case TExceptionTypeProtocol: - if t, ok := err.(TProtocolException); ok { - return prependTProtocolException(prepend, t) - } - case TExceptionTypeApplication: - var t TApplicationException - if errors.As(err, &t) { - return NewTApplicationException(t.TypeId(), msg) - } - } - - return wrappedTException{ - err: err, - msg: msg, - tExceptionType: te.TExceptionType(), - } - } - - return errors.New(msg) -} - -// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. -type TExceptionType byte - -// TExceptionType values -const ( - TExceptionTypeUnknown TExceptionType = iota - TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler - TExceptionTypeApplication // TApplicationExceptions - TExceptionTypeProtocol // TProtocolExceptions - TExceptionTypeTransport // TTransportExceptions -) - -// WrapTException wraps an error into TException. -// -// If err is nil or already TException, it's returned as-is. -// Otherwise it will be wrapped into TException with TExceptionType() returning -// TExceptionTypeUnknown, and Unwrap() returning the original error. -func WrapTException(err error) TException { - if err == nil { - return nil - } - - if te, ok := err.(TException); ok { - return te - } - - return wrappedTException{ - err: err, - msg: err.Error(), - tExceptionType: TExceptionTypeUnknown, - } -} - -type wrappedTException struct { - err error - msg string - tExceptionType TExceptionType -} - -func (w wrappedTException) Error() string { - return w.msg -} - -func (w wrappedTException) TExceptionType() TExceptionType { - return w.tExceptionType -} - -func (w wrappedTException) Unwrap() error { - return w.err -} - -var _ TException = wrappedTException{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index f683e7f544b..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" -) - -// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - - cfg *TConfiguration - - writeBuf bytes.Buffer - - reader *bufio.Reader - readBuf bytes.Buffer - - buffer [4]byte -} - -type tFramedTransportFactory struct { - factory TTransportFactory - cfg *TConfiguration -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - PropagateTConfiguration(factory, conf) - return &tFramedTransportFactory{ - factory: factory, - cfg: conf, - } -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { - PropagateTConfiguration(base, p.cfg) - tt, err := p.factory.GetTransport(base) - if err != nil { - return nil, err - } - return NewTFramedTransportConf(tt, p.cfg), nil -} - -func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.factory, cfg) - p.cfg = cfg -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { - PropagateTConfiguration(transport, conf) - return &TFramedTransport{ - transport: transport, - reader: bufio.NewReader(transport), - cfg: conf, - } -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (read int, err error) { - read, err = p.readBuf.Read(buf) - if err != io.EOF { - return - } - - // For bytes.Buffer.Read, EOF would only happen when read is zero, - // but still, do a sanity check, - // in case that behavior is changed in a future version of go stdlib. - // When that happens, just return nil error, - // and let the caller call Read again to read the next frame. - if read > 0 { - return read, nil - } - - // Reaching here means that the last Read finished the last frame, - // so we need to read the next frame into readBuf now. - if err = p.readFrame(); err != nil { - return read, err - } - newRead, err := p.Read(buf[read:]) - return read + newRead, err -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - buf := p.buffer[:1] - _, err = p.Read(buf) - if err != nil { - return - } - c = buf[0] - return -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - n, err := p.writeBuf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - return p.writeBuf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - return p.writeBuf.WriteString(s) -} - -func (p *TFramedTransport) Flush(ctx context.Context) error { - size := p.writeBuf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - p.writeBuf.Reset() - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if _, err := io.Copy(p.transport, &p.writeBuf); err != nil { - p.writeBuf.Reset() - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush(ctx) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrame() error { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) { - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - _, err := io.CopyN(&p.readBuf, p.reader, int64(size)) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.readBuf.Len()) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tFramedTransportFactory)(nil) - _ TConfigurationSetter = (*TFramedTransport)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go deleted file mode 100644 index ac9bd4882be..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type ( - headerKey string - headerKeyList int -) - -// Values for headerKeyList. -const ( - headerKeyListRead headerKeyList = iota - headerKeyListWrite -) - -// SetHeader sets a header in the context. -func SetHeader(ctx context.Context, key, value string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - value, - ) -} - -// UnsetHeader unsets a previously set header in the context. -func UnsetHeader(ctx context.Context, key string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - nil, - ) -} - -// GetHeader returns a value of the given header from the context. -func GetHeader(ctx context.Context, key string) (value string, ok bool) { - if v := ctx.Value(headerKey(key)); v != nil { - value, ok = v.(string) - } - return -} - -// SetReadHeaderList sets the key list of read THeaders in the context. -func SetReadHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListRead, - keys, - ) -} - -// GetReadHeaderList returns the key list of read THeaders from the context. -func GetReadHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListRead); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// SetWriteHeaderList sets the key list of THeaders to write in the context. -func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListWrite, - keys, - ) -} - -// GetWriteHeaderList returns the key list of THeaders to write from the context. -func GetWriteHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListWrite); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// AddReadTHeaderToContext adds the whole THeader headers into context. -func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { - keys := make([]string, 0, len(headers)) - for key, value := range headers { - ctx = SetHeader(ctx, key, value) - keys = append(keys, key) - } - return SetReadHeaderList(ctx, keys) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go deleted file mode 100644 index 878041f8df1..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" -) - -// THeaderProtocol is a thrift protocol that implements THeader: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -// -// It supports either binary or compact protocol as the wrapped protocol. -// -// Most of the THeader handlings are happening inside THeaderTransport. -type THeaderProtocol struct { - transport *THeaderTransport - - // Will be initialized on first read/write. - protocol TProtocol - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderProtocolConf instead. -func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { - return newTHeaderProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying -// transport with given TConfiguration. -// -// The passed in transport will be wrapped with THeaderTransport. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { - return newTHeaderProtocolConf(trans, conf) -} - -func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { - t := NewTHeaderTransportConf(trans, cfg) - p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) - PropagateTConfiguration(p, cfg) - return &THeaderProtocol{ - transport: t, - protocol: p, - cfg: cfg, - } -} - -type tHeaderProtocolFactory struct { - cfg *TConfiguration -} - -func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return newTHeaderProtocolConf(trans, f.cfg) -} - -func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { - f.cfg = cfg -} - -// Deprecated: Use NewTHeaderProtocolFactoryConf instead. -func NewTHeaderProtocolFactory() TProtocolFactory { - return NewTHeaderProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolFactoryConf creates a factory for THeader with given -// TConfiguration. -func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { - return tHeaderProtocolFactory{ - cfg: conf, - } -} - -// Transport returns the underlying transport. -// -// It's guaranteed to be of type *THeaderTransport. -func (p *THeaderProtocol) Transport() TTransport { - return p.transport -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (p *THeaderProtocol) GetReadHeaders() THeaderMap { - return p.transport.GetReadHeaders() -} - -// SetWriteHeader sets a header for write. -func (p *THeaderProtocol) SetWriteHeader(key, value string) { - p.transport.SetWriteHeader(key, value) -} - -// ClearWriteHeaders clears all write headers previously set. -func (p *THeaderProtocol) ClearWriteHeaders() { - p.transport.ClearWriteHeaders() -} - -// AddTransform add a transform for writing. -func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { - return p.transport.AddTransform(transform) -} - -func (p *THeaderProtocol) Flush(ctx context.Context) error { - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { - newProto, err := p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - return err - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - p.transport.SequenceID = seqID - return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) -} - -func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { - if err := p.protocol.WriteMessageEnd(ctx); err != nil { - return err - } - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { - return p.protocol.WriteStructBegin(ctx, name) -} - -func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { - return p.protocol.WriteStructEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { - return p.protocol.WriteFieldBegin(ctx, name, typeID, id) -} - -func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { - return p.protocol.WriteFieldEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { - return p.protocol.WriteFieldStop(ctx) -} - -func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) -} - -func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { - return p.protocol.WriteMapEnd(ctx) -} - -func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteListBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { - return p.protocol.WriteListEnd(ctx) -} - -func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteSetBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { - return p.protocol.WriteSetEnd(ctx) -} - -func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { - return p.protocol.WriteBool(ctx, value) -} - -func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { - return p.protocol.WriteByte(ctx, value) -} - -func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { - return p.protocol.WriteI16(ctx, value) -} - -func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { - return p.protocol.WriteI32(ctx, value) -} - -func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { - return p.protocol.WriteI64(ctx, value) -} - -func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.protocol.WriteDouble(ctx, value) -} - -func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { - return p.protocol.WriteString(ctx, value) -} - -func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { - return p.protocol.WriteBinary(ctx, value) -} - -// ReadFrame calls underlying THeaderTransport's ReadFrame function. -func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { - return p.transport.ReadFrame(ctx) -} - -func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { - if err = p.transport.ReadFrame(ctx); err != nil { - return - } - - var newProto TProtocol - newProto, err = p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - var tAppExc TApplicationException - if !errors.As(err, &tAppExc) { - return - } - if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { - return - } - if e := tAppExc.Write(ctx, p.protocol); e != nil { - return - } - if e := p.protocol.WriteMessageEnd(ctx); e != nil { - return - } - if e := p.transport.Flush(ctx); e != nil { - return - } - return - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - - return p.protocol.ReadMessageBegin(ctx) -} - -func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { - return p.protocol.ReadMessageEnd(ctx) -} - -func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return p.protocol.ReadStructBegin(ctx) -} - -func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { - return p.protocol.ReadStructEnd(ctx) -} - -func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { - return p.protocol.ReadFieldBegin(ctx) -} - -func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { - return p.protocol.ReadFieldEnd(ctx) -} - -func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - return p.protocol.ReadMapBegin(ctx) -} - -func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { - return p.protocol.ReadMapEnd(ctx) -} - -func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadListBegin(ctx) -} - -func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { - return p.protocol.ReadListEnd(ctx) -} - -func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadSetBegin(ctx) -} - -func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { - return p.protocol.ReadSetEnd(ctx) -} - -func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { - return p.protocol.ReadBool(ctx) -} - -func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { - return p.protocol.ReadByte(ctx) -} - -func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { - return p.protocol.ReadI16(ctx) -} - -func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { - return p.protocol.ReadI32(ctx) -} - -func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { - return p.protocol.ReadI64(ctx) -} - -func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - return p.protocol.ReadDouble(ctx) -} - -func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { - return p.protocol.ReadString(ctx) -} - -func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - return p.protocol.ReadBinary(ctx) -} - -func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { - return p.protocol.Skip(ctx, fieldType) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - PropagateTConfiguration(p.protocol, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) - _ TConfigurationSetter = (*THeaderProtocol)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go deleted file mode 100644 index 6a99535a459..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go +++ /dev/null @@ -1,809 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "compress/zlib" - "context" - "encoding/binary" - "errors" - "fmt" - "io" -) - -// Size in bytes for 32-bit ints. -const size32 = 4 - -type headerMeta struct { - MagicFlags uint32 - SequenceID int32 - HeaderLength uint16 -} - -const headerMetaSize = 10 - -type clientType int - -const ( - clientUnknown clientType = iota - clientHeaders - clientFramedBinary - clientUnframedBinary - clientFramedCompact - clientUnframedCompact -) - -// Constants defined in THeader format: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -const ( - THeaderHeaderMagic uint32 = 0x0fff0000 - THeaderHeaderMask uint32 = 0xffff0000 - THeaderFlagsMask uint32 = 0x0000ffff - THeaderMaxFrameSize uint32 = 0x3fffffff -) - -// THeaderMap is the type of the header map in THeader transport. -type THeaderMap map[string]string - -// THeaderProtocolID is the wrapped protocol id used in THeader. -type THeaderProtocolID int32 - -// Supported THeaderProtocolID values. -const ( - THeaderProtocolBinary THeaderProtocolID = 0x00 - THeaderProtocolCompact THeaderProtocolID = 0x02 - THeaderProtocolDefault = THeaderProtocolBinary -) - -// Declared globally to avoid repetitive allocations, not really used. -var globalMemoryBuffer = NewTMemoryBuffer() - -// Validate checks whether the THeaderProtocolID is a valid/supported one. -func (id THeaderProtocolID) Validate() error { - _, err := id.GetProtocol(globalMemoryBuffer) - return err -} - -// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. -func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { - switch id { - default: - return nil, NewTApplicationException( - INVALID_PROTOCOL, - fmt.Sprintf("THeader protocol id %d not supported", id), - ) - case THeaderProtocolBinary: - return NewTBinaryProtocolTransport(trans), nil - case THeaderProtocolCompact: - return NewTCompactProtocol(trans), nil - } -} - -// THeaderTransformID defines the numeric id of the transform used. -type THeaderTransformID int32 - -// THeaderTransformID values. -// -// Values not defined here are not currently supported, namely HMAC and Snappy. -const ( - TransformNone THeaderTransformID = iota // 0, no special handling - TransformZlib // 1, zlib -) - -var supportedTransformIDs = map[THeaderTransformID]bool{ - TransformNone: true, - TransformZlib: true, -} - -// TransformReader is an io.ReadCloser that handles transforms reading. -type TransformReader struct { - io.Reader - - closers []io.Closer -} - -var _ io.ReadCloser = (*TransformReader)(nil) - -// NewTransformReaderWithCapacity initializes a TransformReader with expected -// closers capacity. -// -// If you don't know the closers capacity beforehand, just use -// -// &TransformReader{Reader: baseReader} -// -// instead would be sufficient. -func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { - return &TransformReader{ - Reader: baseReader, - closers: make([]io.Closer, 0, capacity), - } -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tr *TransformReader) Close() error { - // Call closers in reversed order - for i := len(tr.closers) - 1; i >= 0; i-- { - if err := tr.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tr *TransformReader) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - readCloser, err := zlib.NewReader(tr.Reader) - if err != nil { - return err - } - tr.Reader = readCloser - tr.closers = append(tr.closers, readCloser) - } - return nil -} - -// TransformWriter is an io.WriteCloser that handles transforms writing. -type TransformWriter struct { - io.Writer - - closers []io.Closer -} - -var _ io.WriteCloser = (*TransformWriter)(nil) - -// NewTransformWriter creates a new TransformWriter with base writer and transforms. -func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { - writer := &TransformWriter{ - Writer: baseWriter, - closers: make([]io.Closer, 0, len(transforms)), - } - for _, id := range transforms { - if err := writer.AddTransform(id); err != nil { - return nil, err - } - } - return writer, nil -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tw *TransformWriter) Close() error { - // Call closers in reversed order - for i := len(tw.closers) - 1; i >= 0; i-- { - if err := tw.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - writeCloser := zlib.NewWriter(tw.Writer) - tw.Writer = writeCloser - tw.closers = append(tw.closers, writeCloser) - } - return nil -} - -// THeaderInfoType is the type id of the info headers. -type THeaderInfoType int32 - -// Supported THeaderInfoType values. -const ( - _ THeaderInfoType = iota // Skip 0 - InfoKeyValue // 1 - // Rest of the info types are not supported. -) - -// THeaderTransport is a Transport mode that implements THeader. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -type THeaderTransport struct { - SequenceID int32 - Flags uint32 - - transport TTransport - - // THeaderMap for read and write - readHeaders THeaderMap - writeHeaders THeaderMap - - // Reading related variables. - reader *bufio.Reader - // When frame is detected, we read the frame fully into frameBuffer. - frameBuffer bytes.Buffer - // When it's non-nil, Read should read from frameReader instead of - // reader, and EOF error indicates end of frame instead of end of all - // transport. - frameReader io.ReadCloser - - // Writing related variables - writeBuffer bytes.Buffer - writeTransforms []THeaderTransformID - - clientType clientType - protocolID THeaderProtocolID - cfg *TConfiguration - - // buffer is used in the following scenarios to avoid repetitive - // allocations, while 4 is big enough for all those scenarios: - // - // * header padding (max size 4) - // * write the frame size (size 4) - buffer [4]byte -} - -var _ TTransport = (*THeaderTransport)(nil) - -// Deprecated: Use NewTHeaderTransportConf instead. -func NewTHeaderTransport(trans TTransport) *THeaderTransport { - return NewTHeaderTransportConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportConf creates THeaderTransport from the -// underlying transport, with given TConfiguration attached. -// -// If trans is already a *THeaderTransport, it will be returned as is, -// but with TConfiguration overridden by the value passed in. -// -// The protocol ID in TConfiguration is only useful for client transports. -// For servers, -// the protocol ID will be overridden again to the one set by the client, -// to ensure that servers always speak the same dialect as the client. -func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { - if ht, ok := trans.(*THeaderTransport); ok { - ht.SetTConfiguration(conf) - return ht - } - PropagateTConfiguration(trans, conf) - return &THeaderTransport{ - transport: trans, - reader: bufio.NewReader(trans), - writeHeaders: make(THeaderMap), - protocolID: conf.GetTHeaderProtocolID(), - cfg: conf, - } -} - -// Open calls the underlying transport's Open function. -func (t *THeaderTransport) Open() error { - return t.transport.Open() -} - -// IsOpen calls the underlying transport's IsOpen function. -func (t *THeaderTransport) IsOpen() bool { - return t.transport.IsOpen() -} - -// ReadFrame tries to read the frame header, guess the client type, and handle -// unframed clients. -func (t *THeaderTransport) ReadFrame(ctx context.Context) error { - if !t.needReadFrame() { - // No need to read frame, skipping. - return nil - } - - // Peek and handle the first 32 bits. - // They could either be the length field of a framed message, - // or the first bytes of an unframed message. - var buf []byte - var err error - // This is also usually the first read from a connection, - // so handle retries around socket timeouts. - _, deadlineSet := ctx.Deadline() - for { - buf, err = t.reader.Peek(size32) - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout and we still have time, - // continue trying - continue - } - // For anything else, do not retry - break - } - if err != nil { - return err - } - - frameSize := binary.BigEndian.Uint32(buf) - if frameSize&VERSION_MASK == VERSION_1 { - t.clientType = clientUnframedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientUnframedCompact - return nil - } - - // At this point it should be a framed message, - // sanity check on frameSize then discard the peeked part. - if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("frame too large"), - ) - } - t.reader.Discard(size32) - - // Read the frame fully into frameBuffer. - _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize)) - if err != nil { - return err - } - t.frameReader = io.NopCloser(&t.frameBuffer) - - // Peek and handle the next 32 bits. - buf = t.frameBuffer.Bytes()[:size32] - version := binary.BigEndian.Uint32(buf) - if version&THeaderHeaderMask == THeaderHeaderMagic { - t.clientType = clientHeaders - return t.parseHeaders(ctx, frameSize) - } - if version&VERSION_MASK == VERSION_1 { - t.clientType = clientFramedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientFramedCompact - return nil - } - if err := t.endOfFrame(); err != nil { - return err - } - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - errors.New("unsupported client transport type"), - ) -} - -// endOfFrame does end of frame handling. -// -// It closes frameReader, and also resets frame related states. -func (t *THeaderTransport) endOfFrame() error { - defer func() { - t.frameBuffer.Reset() - t.frameReader = nil - }() - return t.frameReader.Close() -} - -func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { - if t.clientType != clientHeaders { - return nil - } - - var err error - var meta headerMeta - if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil { - return err - } - frameSize -= headerMetaSize - t.Flags = meta.MagicFlags & THeaderFlagsMask - t.SequenceID = meta.SequenceID - headerLength := int64(meta.HeaderLength) * 4 - if int64(frameSize) < headerLength { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("header size is larger than the whole frame"), - ) - } - headerBuf := NewTMemoryBuffer() - _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength) - if err != nil { - return err - } - hp := NewTCompactProtocol(headerBuf) - hp.SetTConfiguration(t.cfg) - - // At this point the header is already read into headerBuf, - // and t.frameBuffer starts from the actual payload. - protoID, err := hp.readVarint32() - if err != nil { - return err - } - t.protocolID = THeaderProtocolID(protoID) - - var transformCount int32 - transformCount, err = hp.readVarint32() - if err != nil { - return err - } - if transformCount > 0 { - reader := NewTransformReaderWithCapacity( - &t.frameBuffer, - int(transformCount), - ) - t.frameReader = reader - transformIDs := make([]THeaderTransformID, transformCount) - for i := 0; i < int(transformCount); i++ { - id, err := hp.readVarint32() - if err != nil { - return err - } - transformIDs[i] = THeaderTransformID(id) - } - // The transform IDs on the wire was added based on the order of - // writing, so on the reading side we need to reverse the order. - for i := transformCount - 1; i >= 0; i-- { - id := transformIDs[i] - if err := reader.AddTransform(id); err != nil { - return err - } - } - } - - // The info part does not use the transforms yet, so it's - // important to continue using headerBuf. - headers := make(THeaderMap) - for { - infoType, err := hp.readVarint32() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return err - } - if THeaderInfoType(infoType) == InfoKeyValue { - count, err := hp.readVarint32() - if err != nil { - return err - } - for i := 0; i < int(count); i++ { - key, err := hp.ReadString(ctx) - if err != nil { - return err - } - value, err := hp.ReadString(ctx) - if err != nil { - return err - } - headers[key] = value - } - } else { - // Skip reading info section on the first - // unsupported info type. - break - } - } - t.readHeaders = headers - - return nil -} - -func (t *THeaderTransport) needReadFrame() bool { - if t.clientType == clientUnknown { - // This is a new connection that's never read before. - return true - } - if t.isFramed() && t.frameReader == nil { - // We just finished the last frame. - return true - } - return false -} - -func (t *THeaderTransport) Read(p []byte) (read int, err error) { - // Here using context.Background instead of a context passed in is safe. - // First is that there's no way to pass context into this function. - // Then, 99% of the case when calling this Read frame is already read - // into frameReader. ReadFrame here is more of preventing bugs that - // didn't call ReadFrame before calling Read. - err = t.ReadFrame(context.Background()) - if err != nil { - return - } - if t.frameReader != nil { - read, err = t.frameReader.Read(p) - if err == nil && t.frameBuffer.Len() <= 0 { - // the last Read finished the frame, do endOfFrame - // handling here. - err = t.endOfFrame() - } else if err == io.EOF { - err = t.endOfFrame() - if err != nil { - return - } - if read == 0 { - // Try to read the next frame when we hit EOF - // (end of frame) immediately. - // When we got here, it means the last read - // finished the previous frame, but didn't - // do endOfFrame handling yet. - // We have to read the next frame here, - // as otherwise we would return 0 and nil, - // which is a case not handled well by most - // protocol implementations. - return t.Read(p) - } - } - return - } - return t.reader.Read(p) -} - -// Write writes data to the write buffer. -// -// You need to call Flush to actually write them to the transport. -func (t *THeaderTransport) Write(p []byte) (int, error) { - return t.writeBuffer.Write(p) -} - -// Flush writes the appropriate header and the write buffer to the underlying transport. -func (t *THeaderTransport) Flush(ctx context.Context) error { - if t.writeBuffer.Len() == 0 { - return nil - } - - defer t.writeBuffer.Reset() - - switch t.clientType { - default: - fallthrough - case clientUnknown: - t.clientType = clientHeaders - fallthrough - case clientHeaders: - headers := NewTMemoryBuffer() - hp := NewTCompactProtocol(headers) - hp.SetTConfiguration(t.cfg) - if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { - return NewTTransportExceptionFromError(err) - } - for _, transform := range t.writeTransforms { - if _, err := hp.writeVarint32(int32(transform)); err != nil { - return NewTTransportExceptionFromError(err) - } - } - if len(t.writeHeaders) > 0 { - if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { - return NewTTransportExceptionFromError(err) - } - for key, value := range t.writeHeaders { - if err := hp.WriteString(ctx, key); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := hp.WriteString(ctx, value); err != nil { - return NewTTransportExceptionFromError(err) - } - } - } - padding := 4 - headers.Len()%4 - if padding < 4 { - buf := t.buffer[:padding] - for i := range buf { - buf[i] = 0 - } - if _, err := headers.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - var payload bytes.Buffer - meta := headerMeta{ - MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, - SequenceID: t.SequenceID, - HeaderLength: uint16(headers.Len() / 4), - } - if err := binary.Write(&payload, binary.BigEndian, meta); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(&payload, headers); err != nil { - return NewTTransportExceptionFromError(err) - } - - writer, err := NewTransformWriter(&payload, t.writeTransforms) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(writer, &t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := writer.Close(); err != nil { - return NewTTransportExceptionFromError(err) - } - - // First write frame length - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(payload.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - // Then write the payload - if _, err := io.Copy(t.transport, &payload); err != nil { - return NewTTransportExceptionFromError(err) - } - - case clientFramedBinary, clientFramedCompact: - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - fallthrough - case clientUnframedBinary, clientUnframedCompact: - if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - select { - default: - case <-ctx.Done(): - return NewTTransportExceptionFromError(ctx.Err()) - } - - return t.transport.Flush(ctx) -} - -// Close closes the transport, along with its underlying transport. -func (t *THeaderTransport) Close() error { - if err := t.Flush(context.Background()); err != nil { - return err - } - return t.transport.Close() -} - -// RemainingBytes calls underlying transport's RemainingBytes. -// -// Even in framed cases, because of all the possible compression transforms -// involved, the remaining frame size is likely to be different from the actual -// remaining readable bytes, so we don't bother to keep tracking the remaining -// frame size by ourselves and just use the underlying transport's -// RemainingBytes directly. -func (t *THeaderTransport) RemainingBytes() uint64 { - return t.transport.RemainingBytes() -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (t *THeaderTransport) GetReadHeaders() THeaderMap { - return t.readHeaders -} - -// SetWriteHeader sets a header for write. -func (t *THeaderTransport) SetWriteHeader(key, value string) { - t.writeHeaders[key] = value -} - -// ClearWriteHeaders clears all write headers previously set. -func (t *THeaderTransport) ClearWriteHeaders() { - t.writeHeaders = make(THeaderMap) -} - -// AddTransform add a transform for writing. -func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { - if !supportedTransformIDs[transform] { - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - fmt.Errorf("THeaderTransformID %d not supported", transform), - ) - } - t.writeTransforms = append(t.writeTransforms, transform) - return nil -} - -// Protocol returns the wrapped protocol id used in this THeaderTransport. -func (t *THeaderTransport) Protocol() THeaderProtocolID { - switch t.clientType { - default: - return t.protocolID - case clientFramedBinary, clientUnframedBinary: - return THeaderProtocolBinary - case clientFramedCompact, clientUnframedCompact: - return THeaderProtocolCompact - } -} - -func (t *THeaderTransport) isFramed() bool { - switch t.clientType { - default: - return false - case clientHeaders, clientFramedBinary, clientFramedCompact: - return true - } -} - -// SetTConfiguration implements TConfigurationSetter. -func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(t.transport, cfg) - t.cfg = cfg -} - -// THeaderTransportFactory is a TTransportFactory implementation to create -// THeaderTransport. -// -// It also implements TConfigurationSetter. -type THeaderTransportFactory struct { - // The underlying factory, could be nil. - Factory TTransportFactory - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderTransportFactoryConf instead. -func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with -// the given *TConfiguration. -func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - return &THeaderTransportFactory{ - Factory: factory, - - cfg: conf, - } -} - -// GetTransport implements TTransportFactory. -func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if f.Factory != nil { - t, err := f.Factory.GetTransport(trans) - if err != nil { - return nil, err - } - return NewTHeaderTransportConf(t, f.cfg), nil - } - return NewTHeaderTransportConf(trans, f.cfg), nil -} - -// SetTConfiguration implements TConfigurationSetter. -func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.Factory, f.cfg) - f.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*THeaderTransportFactory)(nil) - _ TConfigurationSetter = (*THeaderTransport)(nil) -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go deleted file mode 100644 index 9a2cc98cc76..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// THttpClientOptions. -var DefaultHttpClient *http.Client = http.DefaultClient - -type THttpClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header - nsecConnectTimeout int64 - nsecReadTimeout int64 -} - -type THttpClientTransportFactory struct { - options THttpClientOptions - url string -} - -func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*THttpClient) - if ok && t.url != nil { - return NewTHttpClientWithOptions(t.url.String(), p.options) - } - } - return NewTHttpClientWithOptions(p.url, p.options) -} - -type THttpClientOptions struct { - // If nil, DefaultHttpClient is used - Client *http.Client -} - -func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, options: options} -} - -func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} - return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil -} - -func NewTHttpClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *THttpClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *THttpClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.DelHeader("User-Agent") -func (p *THttpClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *THttpClient) Open() error { - // do nothing - return nil -} - -func (p *THttpClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *THttpClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(io.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *THttpClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *THttpClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return n, nil - } - return n, NewTTransportExceptionFromError(err) -} - -func (p *THttpClient) ReadByte() (c byte, err error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - return readByte(p.response.Body) -} - -func (p *THttpClient) Write(buf []byte) (int, error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.Write(buf) -} - -func (p *THttpClient) WriteByte(c byte) error { - if p.requestBuffer == nil { - return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteByte(c) -} - -func (p *THttpClient) WriteString(s string) (n int, err error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteString(s) -} - -func (p *THttpClient) Flush(ctx context.Context) error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - // Give up the ownership of the current request buffer to http request, - // and create a new buffer for the next request. - buf := p.requestBuffer - p.requestBuffer = new(bytes.Buffer) - req, err := http.NewRequest("POST", p.url.String(), buf) - if err != nil { - return NewTTransportExceptionFromError(err) - } - req.Header = p.header - if ctx != nil { - req = req.WithContext(ctx) - } - response, err := p.client.Do(req) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *THttpClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// Deprecated: Use NewTHttpClientTransportFactory instead. -func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. -func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, options) -} - -// Deprecated: Use NewTHttpClientWithOptions instead. -func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, options) -} - -// Deprecated: Use NewTHttpClient instead. -func NewTHttpPostClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go deleted file mode 100644 index bc6922762ad..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "compress/gzip" - "io" - "net/http" - "strings" - "sync" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} - -// gz transparently compresses the HTTP response if the client supports it. -func gz(handler http.HandlerFunc) http.HandlerFunc { - sp := &sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, - } - - return func(w http.ResponseWriter, r *http.Request) { - if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - handler(w, r) - return - } - w.Header().Set("Content-Encoding", "gzip") - gz := sp.Get().(*gzip.Writer) - gz.Reset(w) - defer func() { - _ = gz.Close() - sp.Put(gz) - }() - gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} - handler(gzw, r) - } -} - -type gzipResponseWriter struct { - io.Writer - http.ResponseWriter -} - -func (w gzipResponseWriter) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go deleted file mode 100644 index 1c477990fea..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer), nil - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader), nil - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer), nil - } - return &StreamTransport{}, nil - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer), nil - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader), nil - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer), nil - } - return &StreamTransport{}, nil -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush(ctx context.Context) error { - if p.Writer == nil { - return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.Reader, conf) - PropagateTConfiguration(p.Writer, conf) -} - -var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go deleted file mode 100644 index 8e59d16cfda..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,591 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// Utilizes Simple JSON protocol -// -type TJSONProtocol struct { - *TSimpleJSONProtocol -} - -// Constructor -func NewTJSONProtocol(t TTransport) *TJSONProtocol { - v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TJSONProtocolFactory struct{} - -func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTJSONProtocol(trans) -} - -func NewTJSONProtocolFactory() *TJSONProtocolFactory { - return &TJSONProtocolFactory{} -} - -func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteI16(ctx, id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - if e := p.WriteI64(ctx, int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { - if b { - return p.WriteI32(ctx, 1) - } - return p.WriteI32(ctx, 0) -} - -func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32(ctx) - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { - err := p.ParseListEnd() - return err -} - -func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16(ctx) - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString(ctx) - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, e := p.ReadI64(ctx) - if e != nil { - return keyType, valueType, size, e - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - value, err := p.ReadI32(ctx) - return (value != 0), err -} - -func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush(ctx) - } - return NewTProtocolException(err) -} - -func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - // We don't really use the ctx in ReadString implementation, - // so this is safe for now. - // We might want to add context to ParseElemListBegin if we start to use - // ctx in ReadString implementation in the future. - sElemType, err := p.ReadString(context.Background()) - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - // We don't really use the ctx in ReadString implementation, - // so this is safe for now. - // We might want to add context to ParseElemListBegin if we start to use - // ctx in ReadString implementation in the future. - sElemType, err := p.ReadString(context.Background()) - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { - switch fieldType { - case "tf": - return TType(BOOL), nil - case "i8": - return TType(BYTE), nil - case "i16": - return TType(I16), nil - case "i32": - return TType(I32), nil - case "i64": - return TType(I64), nil - case "dbl": - return TType(DOUBLE), nil - case "str": - return TType(STRING), nil - case "rec": - return TType(STRUCT), nil - case "map": - return TType(MAP), nil - case "set": - return TType(SET), nil - case "lst": - return TType(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go deleted file mode 100644 index c42aac998b7..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "os" - "testing" -) - -// Logger is a simple wrapper of a logging function. -// -// In reality the users might actually use different logging libraries, and they -// are not always compatible with each other. -// -// Logger is meant to be a simple common ground that it's easy to wrap whatever -// logging library they use into. -// -// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design -// discussion behind it. -type Logger func(msg string) - -// NopLogger is a Logger implementation that does nothing. -func NopLogger(msg string) {} - -// StdLogger wraps stdlib log package into a Logger. -// -// If logger passed in is nil, it will fallback to use stderr and default flags. -func StdLogger(logger *log.Logger) Logger { - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - return func(msg string) { - logger.Print(msg) - } -} - -// TestLogger is a Logger implementation can be used in test codes. -// -// It fails the test when being called. -func TestLogger(tb testing.TB) Logger { - return func(msg string) { - tb.Errorf("logger called with msg: %q", msg) - } -} - -func fallbackLogger(logger Logger) Logger { - if logger == nil { - return StdLogger(nil) - } - return logger -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go deleted file mode 100644 index 5936d273037..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" -) - -// Memory buffer-based implementation of the TTransport interface. -type TMemoryBuffer struct { - *bytes.Buffer - size int -} - -type TMemoryBufferTransportFactory struct { - size int -} - -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*TMemoryBuffer) - if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size), nil - } - } - return NewTMemoryBufferLen(p.size), nil -} - -func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { - return &TMemoryBufferTransportFactory{size: size} -} - -func NewTMemoryBuffer() *TMemoryBuffer { - return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewTMemoryBufferLen(size int) *TMemoryBuffer { - buf := make([]byte, 0, size) - return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *TMemoryBuffer) IsOpen() bool { - return true -} - -func (p *TMemoryBuffer) Open() error { - return nil -} - -func (p *TMemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush(ctx context.Context) error { - return nil -} - -func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go deleted file mode 100644 index 25ab2e98a25..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Message type constants in the Thrift protocol. -type TMessageType int32 - -const ( - INVALID_TMESSAGE_TYPE TMessageType = 0 - CALL TMessageType = 1 - REPLY TMessageType = 2 - EXCEPTION TMessageType = 3 - ONEWAY TMessageType = 4 -) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go deleted file mode 100644 index 8a788df02be..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the -// TProcessorFunctions for that TProcessor. -// -// Middlewares are passed in the name of the function as set in the processor -// map of the TProcessor. -type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction - -// WrapProcessor takes an existing TProcessor and wraps each of its inner -// TProcessorFunctions with the middlewares passed in and returns it. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { - for name, processorFunc := range processor.ProcessorMap() { - wrapped := processorFunc - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - wrapped = middlewares[i](name, wrapped) - } - processor.AddToProcessorMap(name, wrapped) - } - return processor -} - -// WrappedTProcessorFunction is a convenience struct that implements the -// TProcessorFunction interface that can be used when implementing custom -// Middleware. -type WrappedTProcessorFunction struct { - // Wrapped is called by WrappedTProcessorFunction.Process and should be a - // "wrapped" call to a base TProcessorFunc.Process call. - Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// Process implements the TProcessorFunction interface using p.Wrapped. -func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { - return p.Wrapped(ctx, seqID, in, out) -} - -// verify that WrappedTProcessorFunction implements TProcessorFunction -var ( - _ TProcessorFunction = WrappedTProcessorFunction{} - _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) -) - -// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls -// with custom middleware. -type ClientMiddleware func(TClient) TClient - -// WrappedTClient is a convenience struct that implements the TClient interface -// using inner Wrapped function. -// -// This is provided to aid in developing ClientMiddleware. -type WrappedTClient struct { - Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -// Call implements the TClient interface by calling and returning c.Wrapped. -func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - return c.Wrapped(ctx, method, args, result) -} - -// verify that WrappedTClient implements TClient -var ( - _ TClient = WrappedTClient{} - _ TClient = (*WrappedTClient)(nil) -) - -// WrapClient wraps the given TClient in the given middlewares. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - client = middlewares[i](client) - } - return client -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index d542b23a998..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" - "strings" -) - -/* -TMultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) -transport := thrift.NewTFramedTransport(socket) -protocol := thrift.NewTBinaryProtocolTransport(transport) - -mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type TMultiplexedProtocol struct { - TProtocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { - return &TMultiplexedProtocol{ - TProtocol: protocol, - serviceName: serviceName, - } -} - -func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) - } -} - -/* -TMultiplexedProcessor is a TProcessor allowing -a single TServer to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewTMultiplexedProcessor() - -firstProcessor := -processor.RegisterProcessor("FirstService", firstProcessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewTSimpleServer2(processor, serverTransport) -server.Serve(); -*/ - -type TMultiplexedProcessor struct { - serviceProcessorMap map[string]TProcessor - DefaultProcessor TProcessor -} - -func NewTMultiplexedProcessor() *TMultiplexedProcessor { - return &TMultiplexedProcessor{ - serviceProcessorMap: make(map[string]TProcessor), - } -} - -// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" -// to TProcessorFunction for any registered processors. If there is also a -// DefaultProcessor, the keys for the methods on that processor will simply be -// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and -// other registered processors, then the keys will be a mix of both formats. -// -// The implementation differs with other TProcessors in that the map returned is -// a new map, while most TProcessors just return their internal mapping directly. -// This means that edits to the map returned by this implementation of ProcessorMap -// will not affect the underlying mapping within the TMultiplexedProcessor. -func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { - processorFuncMap := make(map[string]TProcessorFunction) - for name, processor := range t.serviceProcessorMap { - for method, processorFunc := range processor.ProcessorMap() { - processorFuncName := name + MULTIPLEXED_SEPARATOR + method - processorFuncMap[processorFuncName] = processorFunc - } - } - if t.DefaultProcessor != nil { - for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { - processorFuncMap[method] = processorFunc - } - } - return processorFuncMap -} - -// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on -// the format of "name". -// -// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", -// then it sets the given TProcessorFunction on the inner TProcessor with the -// ProcessorName component using the FunctionName component. -// -// If "name" is just in the format "{FunctionName}", that is to say there is no -// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor -// configured, then it will set the given TProcessorFunction on the DefaultProcessor -// using the given name. -// -// If there is not a TProcessor available for the given name, then this function -// does nothing. This can happen when there is no TProcessor registered for -// the given ProcessorName or if all that is given is the FunctionName and there -// is no DefaultProcessor set. -func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { - processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR) - if !found { - if t.DefaultProcessor != nil { - t.DefaultProcessor.AddToProcessorMap(processorName, processorFunc) - } - return - } - if processor, ok := t.serviceProcessorMap[processorName]; ok { - processor.AddToProcessorMap(funcName, processorFunc) - } - -} - -// verify that TMultiplexedProcessor implements TProcessor -var _ TProcessor = (*TMultiplexedProcessor)(nil) - -func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { - t.DefaultProcessor = processor -} - -func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]TProcessor) - } - t.serviceProcessorMap[name] = processor -} - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin(ctx) - if err != nil { - return false, NewTProtocolException(err) - } - if typeId != CALL && typeId != ONEWAY { - return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) - } - // extract the service name - processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR) - if !found { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, NewTProtocolException(fmt.Errorf( - "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", - name, - )) - } - actualProcessor, ok := t.serviceProcessorMap[processorName] - if !ok { - return false, NewTProtocolException(fmt.Errorf( - "Service name not found: %s. Did you forget to call registerProcessor()?", - processorName, - )) - } - smb := NewStoredMessageProtocol(in, funcName, typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} - -// Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - TProtocol - name string - typeId TMessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go deleted file mode 100644 index e4512d204c0..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(iValue, 10) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(int64(iValue), 10) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go deleted file mode 100644 index fb564ea8193..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int8Ptr(v int8) *int8 { return &v } -func Int16Ptr(v int16) *int16 { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go deleted file mode 100644 index 245a3ccfc98..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(ctx context.Context, in, out TProtocol) (bool, TException) - - // ProcessorMap returns a map of thrift method names to TProcessorFunctions. - ProcessorMap() map[string]TProcessorFunction - - // AddToProcessorMap adds the given TProcessorFunction to the internal - // processor map at the given key. - // - // If one is already set at the given key, it will be replaced with the new - // TProcessorFunction. - AddToProcessorMap(string, TProcessorFunction) -} - -type TProcessorFunction interface { - Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// The default processor factory just returns a singleton -// instance. -type TProcessorFactory interface { - GetProcessor(trans TTransport) TProcessor -} - -type tProcessorFactory struct { - processor TProcessor -} - -func NewTProcessorFactory(p TProcessor) TProcessorFactory { - return &tProcessorFactory{processor: p} -} - -func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type TProcessorFunctionFactory interface { - GetProcessorFunction(trans TTransport) TProcessorFunction -} - -type tProcessorFunctionFactory struct { - processor TProcessorFunction -} - -func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { - return &tProcessorFunctionFactory{processor: p} -} - -func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { - return p.processor -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 0a69bd4162d..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "fmt" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error - WriteMessageEnd(ctx context.Context) error - WriteStructBegin(ctx context.Context, name string) error - WriteStructEnd(ctx context.Context) error - WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error - WriteFieldEnd(ctx context.Context) error - WriteFieldStop(ctx context.Context) error - WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error - WriteMapEnd(ctx context.Context) error - WriteListBegin(ctx context.Context, elemType TType, size int) error - WriteListEnd(ctx context.Context) error - WriteSetBegin(ctx context.Context, elemType TType, size int) error - WriteSetEnd(ctx context.Context) error - WriteBool(ctx context.Context, value bool) error - WriteByte(ctx context.Context, value int8) error - WriteI16(ctx context.Context, value int16) error - WriteI32(ctx context.Context, value int32) error - WriteI64(ctx context.Context, value int64) error - WriteDouble(ctx context.Context, value float64) error - WriteString(ctx context.Context, value string) error - WriteBinary(ctx context.Context, value []byte) error - - ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd(ctx context.Context) error - ReadStructBegin(ctx context.Context) (name string, err error) - ReadStructEnd(ctx context.Context) error - ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) - ReadFieldEnd(ctx context.Context) error - ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) - ReadMapEnd(ctx context.Context) error - ReadListBegin(ctx context.Context) (elemType TType, size int, err error) - ReadListEnd(ctx context.Context) error - ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) - ReadSetEnd(ctx context.Context) error - ReadBool(ctx context.Context) (value bool, err error) - ReadByte(ctx context.Context) (value int8, err error) - ReadI16(ctx context.Context) (value int16, err error) - ReadI32(ctx context.Context) (value int32, err error) - ReadI64(ctx context.Context) (value int64, err error) - ReadDouble(ctx context.Context) (value float64, err error) - ReadString(ctx context.Context) (value string, err error) - ReadBinary(ctx context.Context) (value []byte, err error) - - Skip(ctx context.Context, fieldType TType) (err error) - Flush(ctx context.Context) (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { - return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case BOOL: - _, err = self.ReadBool(ctx) - return - case BYTE: - _, err = self.ReadByte(ctx) - return - case I16: - _, err = self.ReadI16(ctx) - return - case I32: - _, err = self.ReadI32(ctx) - return - case I64: - _, err = self.ReadI64(ctx) - return - case DOUBLE: - _, err = self.ReadDouble(ctx) - return - case STRING: - _, err = self.ReadString(ctx) - return - case STRUCT: - if _, err = self.ReadStructBegin(ctx); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin(ctx) - if typeId == STOP { - break - } - err := Skip(ctx, self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd(ctx) - } - return self.ReadStructEnd(ctx) - case MAP: - keyType, valueType, size, err := self.ReadMapBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(ctx, valueType) - } - return self.ReadMapEnd(ctx) - case SET: - elemType, size, err := self.ReadSetBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd(ctx) - case LIST: - elemType, size, err := self.ReadListBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd(ctx) - default: - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 9dcf4bfd94c..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" - "errors" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - err error - msg string -} - -var _ TProtocolException = (*tProtocolException)(nil) - -func (tProtocolException) TExceptionType() TExceptionType { - return TExceptionTypeProtocol -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.msg -} - -func (p *tProtocolException) Error() string { - return p.msg -} - -func (p *tProtocolException) Unwrap() error { - return p.err -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - - if e, ok := err.(TProtocolException); ok { - return e - } - - if errors.As(err, new(base64.CorruptInputError)) { - return NewTProtocolExceptionWithType(INVALID_DATA, err) - } - - return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{ - typeId: errType, - err: err, - msg: err.Error(), - } -} - -func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { - return &tProtocolException{ - typeId: err.TypeId(), - err: err, - msg: prepend + err.Error(), - } -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go deleted file mode 100644 index c40f796d886..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory interface for constructing protocol instances. -type TProtocolFactory interface { - GetProtocol(trans TTransport) TProtocol -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go deleted file mode 100644 index d884c6ac6c4..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type responseHelperKey struct{} - -// TResponseHelper defines a object with a set of helper functions that can be -// retrieved from the context object passed into server handler functions. -// -// Use GetResponseHelper to retrieve the injected TResponseHelper implementation -// from the context object. -// -// The zero value of TResponseHelper is valid with all helper functions being -// no-op. -type TResponseHelper struct { - // THeader related functions - *THeaderResponseHelper -} - -// THeaderResponseHelper defines THeader related TResponseHelper functions. -// -// The zero value of *THeaderResponseHelper is valid with all helper functions -// being no-op. -type THeaderResponseHelper struct { - proto *THeaderProtocol -} - -// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the -// underlying TProtocol. -func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { - if hp, ok := proto.(*THeaderProtocol); ok { - return &THeaderResponseHelper{ - proto: hp, - } - } - return nil -} - -// SetHeader sets a response header. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) SetHeader(key, value string) { - if h != nil && h.proto != nil { - h.proto.SetWriteHeader(key, value) - } -} - -// ClearHeaders clears all the response headers previously set. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) ClearHeaders() { - if h != nil && h.proto != nil { - h.proto.ClearWriteHeaders() - } -} - -// GetResponseHelper retrieves the TResponseHelper implementation injected into -// the context object. -// -// If no helper was found in the context object, a nop helper with ok == false -// will be returned. -func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { - if v := ctx.Value(responseHelperKey{}); v != nil { - helper, ok = v.(TResponseHelper) - } - return -} - -// SetResponseHelper injects TResponseHelper into the context object. -func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { - return context.WithValue(ctx, responseHelperKey{}, helper) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go deleted file mode 100644 index 83fdf29f5cb..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type RichTransport struct { - TTransport -} - -// Wraps Transport to provide TRichTransport interface -func NewTRichTransport(trans TTransport) *RichTransport { - return &RichTransport{trans} -} - -func (r *RichTransport) ReadByte() (c byte, err error) { - return readByte(r.TTransport) -} - -func (r *RichTransport) WriteByte(c byte) error { - return writeByte(r.TTransport, c) -} - -func (r *RichTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *RichTransport) RemainingBytes() (num_bytes uint64) { - return r.TTransport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index c44979094c6..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(ctx context.Context, p TProtocol) error - Read(ctx context.Context, p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - if err = t.Transport.Flush(ctx); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - - if err = t.Transport.Flush(ctx); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} - -// TSerializerPool is the thread-safe version of TSerializer, it uses resource -// pool of TSerializer under the hood. -// -// It must be initialized with either NewTSerializerPool or -// NewTSerializerPoolSizeFactory. -type TSerializerPool struct { - pool sync.Pool -} - -// NewTSerializerPool creates a new TSerializerPool. -// -// NewTSerializer can be used as the arg here. -func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given -// size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.WriteString(ctx, msg) -} - -func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.Write(ctx, msg) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go deleted file mode 100644 index f813fa3532c..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TServer interface { - ProcessorFactory() TProcessorFactory - ServerTransport() TServerTransport - InputTransportFactory() TTransportFactory - OutputTransportFactory() TTransportFactory - InputProtocolFactory() TProtocolFactory - OutputProtocolFactory() TProtocolFactory - - // Starts the server - Serve() error - // Stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go deleted file mode 100644 index 7dd24ae3648..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "sync" - "time" -) - -type TServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewTServerSocket(listenAddr string) (*TServerSocket, error) { - return NewTServerSocketTimeout(listenAddr, 0) -} - -func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -// Creates a TServerSocket from a net.Addr -func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { - return &TServerSocket{addr: addr, clientTimeout: clientTimeout} -} - -func (p *TServerSocket) Listen() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TServerSocket) Accept() (TTransport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - - p.mu.Lock() - listener := p.listener - p.mu.Unlock() - if listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - - conn, err := listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TServerSocket) Open() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *TServerSocket) Close() error { - var err error - p.mu.Lock() - if p.IsListening() { - err = p.listener.Close() - p.listener = nil - } - p.mu.Unlock() - return err -} - -func (p *TServerSocket) Interrupt() error { - p.mu.Lock() - p.interrupted = true - p.mu.Unlock() - p.Close() - - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go deleted file mode 100644 index 51c40b64a1d..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Server transport. Object which provides client transports. -type TServerTransport interface { - Listen() error - Accept() (TTransport, error) - Close() error - - // Optional method implementation. This signals to the server transport - // that it should break out of any accept() or listen() that it is currently - // blocked on. This method, if implemented, MUST be thread safe, as it may - // be called from a different thread context than the other TServerTransport - // methods. - Interrupt() error -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go deleted file mode 100644 index d1a8154532d..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1373 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_INVALID _ParseContext = iota - _CONTEXT_IN_TOPLEVEL // 1 - _CONTEXT_IN_LIST_FIRST // 2 - _CONTEXT_IN_LIST // 3 - _CONTEXT_IN_OBJECT_FIRST // 4 - _CONTEXT_IN_OBJECT_NEXT_KEY // 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -type jsonContextStack []_ParseContext - -func (s *jsonContextStack) push(v _ParseContext) { - *s = append(*s, v) -} - -func (s jsonContextStack) peek() (v _ParseContext, ok bool) { - l := len(s) - if l <= 0 { - return - } - return s[l-1], true -} - -func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { - l := len(*s) - if l <= 0 { - return - } - v = (*s)[l-1] - *s = (*s)[0 : l-1] - return v, true -} - -var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) - -// Simple JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TSimpleJSONProtocol struct { - trans TTransport - - parseContextStack jsonContextStack - dumpContext jsonContextStack - - writer *bufio.Writer - reader *bufio.Reader -} - -// Constructor -func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { - v := &TSimpleJSONProtocol{trans: t, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TSimpleJSONProtocolFactory struct{} - -func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTSimpleJSONProtocol(trans) -} - -func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{} -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte - json_nonbase_map_elem_bytes []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} - json_nonbase_map_elem_bytes = []byte{']', ',', '['} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteString(ctx, name); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(keyType)); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(valueType)); e != nil { - return e - } - return p.WriteI32(ctx, int32(size)) -} - -func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { - return p.OutputBool(b) -} - -func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewTProtocolException(io.EOF) -} - -func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte(ctx) - keyType = TType(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte(ctx) - valueType = TType(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64(ctx) - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.writer.Flush()) -} - -func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TSimpleJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewTProtocolException(e) - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewTProtocolException(e) - } - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v = strconv.FormatFloat(value, 'g', -1, 64) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v := strconv.FormatInt(value, 10) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewTProtocolException(e) -} - -func (p *TSimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.OutputI64(int64(elemType)); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - default: - break - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewTProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewTProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewTProtocolException(err) -} - -func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TSimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - break - } - } - p.parseContextStack.pop() - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, _, err := p.ParseI64() - elemType = TType(bElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err2 := p.ParseI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TSimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - break - } - } - p.parseContextStack.pop() - if cxt, ok := p.parseContextStack.peek(); !ok { - return errEmptyJSONContextStack - } else if cxt == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { - e := p.readNonSignificantWhitespace() - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - b, e := p.reader.Peek(1) - if len(b) > 0 { - c := b[0] - switch c { - case JSON_NULL[0]: - buf := make([]byte, len(JSON_NULL)) - _, e := p.reader.Read(buf) - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - if string(JSON_NULL) != string(buf) { - e = mismatch(string(JSON_NULL), string(buf)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return nil, VOID, nil - case JSON_QUOTE: - p.reader.ReadByte() - v, e := p.ParseStringBody() - if e != nil { - return v, UTF8, NewTProtocolException(e) - } - if v == JSON_INFINITY { - return INFINITY, DOUBLE, nil - } else if v == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY, DOUBLE, nil - } else if v == JSON_NAN { - return NAN, DOUBLE, nil - } - return v, UTF8, nil - case JSON_TRUE[0]: - buf := make([]byte, len(JSON_TRUE)) - _, e := p.reader.Read(buf) - if e != nil { - return true, BOOL, NewTProtocolException(e) - } - if string(JSON_TRUE) != string(buf) { - e := mismatch(string(JSON_TRUE), string(buf)) - return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return true, BOOL, nil - case JSON_FALSE[0]: - buf := make([]byte, len(JSON_FALSE)) - _, e := p.reader.Read(buf) - if e != nil { - return false, BOOL, NewTProtocolException(e) - } - if string(JSON_FALSE) != string(buf) { - e := mismatch(string(JSON_FALSE), string(buf)) - return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return false, BOOL, nil - case JSON_LBRACKET[0]: - _, e := p.reader.ReadByte() - return make([]interface{}, 0), LIST, NewTProtocolException(e) - case JSON_LBRACE[0]: - _, e := p.reader.ReadByte() - return make(map[string]interface{}), STRUCT, NewTProtocolException(e) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: - // assume numeric - v, e := p.readNumeric() - return v, DOUBLE, e - default: - e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - e = fmt.Errorf("Cannot read a single element while parsing JSON.") - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - -} - -func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - break - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - break - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *TSimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewTProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } else { - break - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) < (i+1) || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} - p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} -} - -func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) -} - -var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go deleted file mode 100644 index 563cbfc694a..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" -) - -// ErrAbandonRequest is a special error server handler implementations can -// return to indicate that the request has been abandoned. -// -// TSimpleServer will check for this error, and close the client connection -// instead of writing the response/error back to the client. -// -// It shall only be used when the server handler implementation know that the -// client already abandoned the request (by checking that the passed in context -// is already canceled, for example). -var ErrAbandonRequest = errors.New("request abandoned") - -// ServerConnectivityCheckInterval defines the ticker interval used by -// connectivity check in thrift compiled TProcessorFunc implementations. -// -// It's defined as a variable instead of constant, so that thrift server -// implementations can change its value to control the behavior. -// -// If it's changed to <=0, the feature will be disabled. -var ServerConnectivityCheckInterval = time.Millisecond * 5 - -/* - * This is not a typical TSimpleServer as it is not blocked after accept a socket. - * It is more like a TThreadedServer that can handle different connections in different goroutines. - * This will work if golang user implements a conn-pool like thing in client side. - */ -type TSimpleServer struct { - closed int32 - wg sync.WaitGroup - mu sync.Mutex - - processorFactory TProcessorFactory - serverTransport TServerTransport - inputTransportFactory TTransportFactory - outputTransportFactory TTransportFactory - inputProtocolFactory TProtocolFactory - outputProtocolFactory TProtocolFactory - - // Headers to auto forward in THeaderProtocol - forwardHeaders []string - - logger Logger -} - -func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) -} - -func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory4(NewTProcessorFactory(processor), - serverTransport, - transportFactory, - protocolFactory, - ) -} - -func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(NewTProcessorFactory(processor), - serverTransport, - inputTransportFactory, - outputTransportFactory, - inputProtocolFactory, - outputProtocolFactory, - ) -} - -func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - NewTTransportFactory(), - NewTTransportFactory(), - NewTBinaryProtocolFactoryDefault(), - NewTBinaryProtocolFactoryDefault(), - ) -} - -func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - transportFactory, - transportFactory, - protocolFactory, - protocolFactory, - ) -} - -func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return &TSimpleServer{ - processorFactory: processorFactory, - serverTransport: serverTransport, - inputTransportFactory: inputTransportFactory, - outputTransportFactory: outputTransportFactory, - inputProtocolFactory: inputProtocolFactory, - outputProtocolFactory: outputProtocolFactory, - } -} - -func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { - return p.processorFactory -} - -func (p *TSimpleServer) ServerTransport() TServerTransport { - return p.serverTransport -} - -func (p *TSimpleServer) InputTransportFactory() TTransportFactory { - return p.inputTransportFactory -} - -func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { - return p.outputTransportFactory -} - -func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { - return p.inputProtocolFactory -} - -func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { - return p.outputProtocolFactory -} - -func (p *TSimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -// SetForwardHeaders sets the list of header keys that will be auto forwarded -// while using THeaderProtocol. -// -// "forward" means that when the server is also a client to other upstream -// thrift servers, the context object user gets in the processor functions will -// have both read and write headers set, with write headers being forwarded. -// Users can always override the write headers by calling SetWriteHeaderList -// before calling thrift client functions. -func (p *TSimpleServer) SetForwardHeaders(headers []string) { - size := len(headers) - if size == 0 { - p.forwardHeaders = nil - return - } - - keys := make([]string, size) - copy(keys, headers) - p.forwardHeaders = keys -} - -// SetLogger sets the logger used by this TSimpleServer. -// -// If no logger was set before Serve is called, a default logger using standard -// log library will be used. -func (p *TSimpleServer) SetLogger(logger Logger) { - p.logger = logger -} - -func (p *TSimpleServer) innerAccept() (int32, error) { - client, err := p.serverTransport.Accept() - p.mu.Lock() - defer p.mu.Unlock() - closed := atomic.LoadInt32(&p.closed) - if closed != 0 { - return closed, nil - } - if err != nil { - return 0, err - } - if client != nil { - p.wg.Add(1) - go func() { - defer p.wg.Done() - if err := p.processRequests(client); err != nil { - p.logger(fmt.Sprintf("error processing request: %v", err)) - } - }() - } - return 0, nil -} - -func (p *TSimpleServer) AcceptLoop() error { - for { - closed, err := p.innerAccept() - if err != nil { - return err - } - if closed != 0 { - return nil - } - } -} - -func (p *TSimpleServer) Serve() error { - p.logger = fallbackLogger(p.logger) - - err := p.Listen() - if err != nil { - return err - } - p.AcceptLoop() - return nil -} - -func (p *TSimpleServer) Stop() error { - p.mu.Lock() - defer p.mu.Unlock() - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - atomic.StoreInt32(&p.closed, 1) - p.serverTransport.Interrupt() - p.wg.Wait() - return nil -} - -// If err is actually EOF, return nil, otherwise return err as-is. -func treatEOFErrorsAsNil(err error) error { - if err == nil { - return nil - } - if errors.Is(err, io.EOF) { - return nil - } - var te TTransportException - if errors.As(err, &te) && te.TypeId() == END_OF_FILE { - return nil - } - return err -} - -func (p *TSimpleServer) processRequests(client TTransport) (err error) { - defer func() { - err = treatEOFErrorsAsNil(err) - }() - - processor := p.processorFactory.GetProcessor(client) - inputTransport, err := p.inputTransportFactory.GetTransport(client) - if err != nil { - return err - } - inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) - var outputTransport TTransport - var outputProtocol TProtocol - - // for THeaderProtocol, we must use the same protocol instance for - // input and output so that the response is in the same dialect that - // the server detected the request was in. - headerProtocol, ok := inputProtocol.(*THeaderProtocol) - if ok { - outputProtocol = inputProtocol - } else { - oTrans, err := p.outputTransportFactory.GetTransport(client) - if err != nil { - return err - } - outputTransport = oTrans - outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) - } - - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - for { - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - - ctx := SetResponseHelper( - defaultCtx, - TResponseHelper{ - THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), - }, - ) - if headerProtocol != nil { - // We need to call ReadFrame here, otherwise we won't - // get any headers on the AddReadTHeaderToContext call. - // - // ReadFrame is safe to be called multiple times so it - // won't break when it's called again later when we - // actually start to read the message. - if err := headerProtocol.ReadFrame(ctx); err != nil { - return err - } - ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) - ctx = SetWriteHeaderList(ctx, p.forwardHeaders) - } - - ok, err := processor.Process(ctx, inputProtocol, outputProtocol) - if errors.Is(err, ErrAbandonRequest) { - return client.Close() - } - if errors.As(err, new(TTransportException)) && err != nil { - return err - } - var tae TApplicationException - if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { - continue - } - if !ok { - break - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go deleted file mode 100644 index e911bf16681..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "net" - "time" -) - -type TSocket struct { - conn *socketConn - addr net.Addr - cfg *TConfiguration - - connectTimeout time.Duration - socketTimeout time.Duration -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// }) -func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return nil, err - } - return NewTSocketFromAddrConf(addr, conf), nil -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }) -} - -// NewTSocketFromAddrConf creates a TSocket from a net.Addr -func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { - return &TSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromAddrConf instead. -func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { - return NewTSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }) -} - -// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. -func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { - return &TSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromConnConf instead. -func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { - return NewTSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to set connect and socket timeouts. -func (p *TSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = createSocketConnFromReturn(net.DialTimeout( - p.addr.Network(), - p.addr.String(), - p.cfg.GetConnectTimeout(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go deleted file mode 100644 index c1cc30c6cc5..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" -) - -// socketConn is a wrapped net.Conn that tries to do connectivity check. -type socketConn struct { - net.Conn - - buffer [1]byte -} - -var _ net.Conn = (*socketConn)(nil) - -// createSocketConnFromReturn is a language sugar to help create socketConn from -// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. -func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { - if err != nil { - return nil, err - } - return &socketConn{ - Conn: conn, - }, nil -} - -// wrapSocketConn wraps an existing net.Conn into *socketConn. -func wrapSocketConn(conn net.Conn) *socketConn { - // In case conn is already wrapped, - // return it as-is and avoid double wrapping. - if sc, ok := conn.(*socketConn); ok { - return sc - } - - return &socketConn{ - Conn: conn, - } -} - -// isValid checks whether there's a valid connection. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// It's the same as the previous implementation of TSocket.IsOpen and -// TSSLSocket.IsOpen before we added connectivity check. -func (sc *socketConn) isValid() bool { - return sc != nil && sc.Conn != nil -} - -// IsOpen checks whether the connection is open. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// Otherwise, it tries to do a connectivity check and returns the result. -// -// It also has the side effect of resetting the previously set read deadline on -// the socket. As a result, it shouldn't be called between setting read deadline -// and doing actual read. -func (sc *socketConn) IsOpen() bool { - if !sc.isValid() { - return false - } - return sc.checkConn() == nil -} - -// Read implements io.Reader. -// -// On Windows, it behaves the same as the underlying net.Conn.Read. -// -// On non-Windows, it treats len(p) == 0 as a connectivity check instead of -// readability check, which means instead of blocking until there's something to -// read (readability check), or always return (0, nil) (the default behavior of -// go's stdlib implementation on non-Windows), it never blocks, and will return -// an error if the connection is lost. -func (sc *socketConn) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, sc.read0() - } - - return sc.Conn.Read(p) -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go deleted file mode 100644 index f5fab3ab653..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build !windows - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" - "syscall" - "time" -) - -// We rely on this variable to be the zero time, -// but define it as global variable to avoid repetitive allocations. -// Please DO NOT mutate this variable in any way. -var zeroTime time.Time - -func (sc *socketConn) read0() error { - return sc.checkConn() -} - -func (sc *socketConn) checkConn() error { - syscallConn, ok := sc.Conn.(syscall.Conn) - if !ok { - // No way to check, return nil - return nil - } - - // The reading about to be done here is non-blocking so we don't really - // need a read deadline. We just need to clear the previously set read - // deadline, if any. - sc.Conn.SetReadDeadline(zeroTime) - - rc, err := syscallConn.SyscallConn() - if err != nil { - return err - } - - var n int - - if readErr := rc.Read(func(fd uintptr) bool { - n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) - return true - }); readErr != nil { - return readErr - } - - if n > 0 { - // We got something, which means we are good - return nil - } - - if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { - // This means the connection is still open but we don't have - // anything to read right now. - return nil - } - - if err != nil { - return err - } - - // At this point, it means the other side already closed the connection. - return io.EOF -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go deleted file mode 100644 index 679838c3b64..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -func (sc *socketConn) read0() error { - // On windows, we fallback to the default behavior of reading 0 bytes. - var p []byte - _, err := sc.Conn.Read(p) - return err -} - -func (sc *socketConn) checkConn() error { - // On windows, we always return nil for this check. - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 907afca326f..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type TSSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { - return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *TSSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TSSLServerSocket) Accept() (TTransport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TSSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TSSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TSSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index 6359a74ceb2..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn *socketConn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - - cfg *TConfiguration -} - -// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// }) -func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) { - if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - return &TSSLSocket{ - hostPort: hostPort, - cfg: conf, - }, nil -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. -func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromAddrConf instead. -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. -func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromConnConf instead. -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to change connect and socket timeouts. -func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - "tcp", - p.hostPort, - p.cfg.GetTLSConfig(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - p.addr.Network(), - p.addr.String(), - p.cfg.GetTLSConfig(), - )); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go deleted file mode 100644 index d68d0b3179c..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -type Flusher interface { - Flush() (err error) -} - -type ContextFlusher interface { - Flush(ctx context.Context) (err error) -} - -type ReadSizeProvider interface { - RemainingBytes() (num_bytes uint64) -} - -// Encapsulates the I/O layer -type TTransport interface { - io.ReadWriteCloser - ContextFlusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// This is "enhanced" transport with extra capabilities. You need to use one of these -// to construct protocol. -// Notably, TSocket does not implement this interface, and it is always a mistake to use -// TSocket directly in protocol. -type TRichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - ContextFlusher - ReadSizeProvider -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go deleted file mode 100644 index 0a3f07646d3..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error - msg string -} - -var _ TTransportException = (*tTransportException)(nil) - -func (tTransportException) TExceptionType() TExceptionType { - return TExceptionTypeTransport -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.msg -} - -func (p *tTransportException) Err() error { - return p.err -} - -func (p *tTransportException) Unwrap() error { - return p.err -} - -func (p *tTransportException) Timeout() bool { - return p.typeId == TIMED_OUT -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{ - typeId: t, - err: errors.New(e), - msg: e, - } -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - te := &tTransportException{ - typeId: UNKNOWN_TRANSPORT_EXCEPTION, - err: e, - msg: e.Error(), - } - - if isTimeoutError(e) { - te.typeId = TIMED_OUT - return te - } - - if errors.Is(e, io.EOF) { - te.typeId = END_OF_FILE - return te - } - - return te -} - -func prependTTransportException(prepend string, e TTransportException) TTransportException { - return &tTransportException{ - typeId: e.TypeId(), - err: e, - msg: prepend + e.Error(), - } -} - -// isTimeoutError returns true when err is an error caused by timeout. -// -// Note that this also includes TTransportException wrapped timeout errors. -func isTimeoutError(err error) bool { - var t timeoutable - if errors.As(err, &t) { - return t.Timeout() - } - return false -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go deleted file mode 100644 index c805807940a..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory class used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TTransportFactory interface { - GetTransport(trans TTransport) (TTransport, error) -} - -type tTransportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return trans, nil -} - -func NewTTransportFactory() TTransportFactory { - return &tTransportFactory{} -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go deleted file mode 100644 index b24f1b05c45..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Type constants in the Thrift protocol -type TType byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - //BINARY = 18 wrong and unused -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - DOUBLE: "DOUBLE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p TType) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go deleted file mode 100644 index 259943a627c..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. - */ - -package thrift - -import ( - "compress/zlib" - "context" - "io" -) - -// TZlibTransportFactory is a factory for TZlibTransport instances -type TZlibTransportFactory struct { - level int - factory TTransportFactory -} - -// TZlibTransport is a TTransport implementation that makes use of zlib compression. -type TZlibTransport struct { - reader io.ReadCloser - transport TTransport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewTZlibTransport -func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if p.factory != nil { - // wrap other factory - var err error - trans, err = p.factory.GetTransport(trans) - if err != nil { - return nil, err - } - } - return NewTZlibTransport(trans, p.level) -} - -// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory -func NewTZlibTransportFactory(level int) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: nil} -} - -// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory -// as a wrapper over existing transport factory -func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: factory} -} - -// NewTZlibTransport constructs a new instance of TZlibTransport -func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - return nil, err - } - - return &TZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *TZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush(ctx context.Context) error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush(ctx) -} - -// IsOpen returns true if the transport is open -func (z *TZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *TZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *TZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -// RemainingBytes returns the size in bytes of the data that is still to be -// read. -func (z *TZlibTransport) RemainingBytes() uint64 { - return z.transport.RemainingBytes() -} - -func (z *TZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(z.transport, conf) -} - -var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go deleted file mode 100644 index 9b4a54afc66..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.21.0" - "go.opentelemetry.io/otel/trace" -) - -const ( - keyInstrumentationLibraryName = "otel.library.name" - keyInstrumentationLibraryVersion = "otel.library.version" - keyError = "error" - keySpanKind = "span.kind" - keyStatusCode = "otel.status_code" - keyStatusMessage = "otel.status_description" - keyDroppedAttributeCount = "otel.event.dropped_attributes_count" - keyEventName = "event" -) - -// New returns an OTel Exporter implementation that exports the collected -// spans to Jaeger. -func New(endpointOption EndpointOption) (*Exporter, error) { - uploader, err := endpointOption.newBatchUploader() - if err != nil { - return nil, err - } - - // Fetch default service.name from default resource for backup - var defaultServiceName string - defaultResource := resource.Default() - if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { - defaultServiceName = value.AsString() - } - if defaultServiceName == "" { - return nil, fmt.Errorf("failed to get service name from default resource") - } - - stopCh := make(chan struct{}) - e := &Exporter{ - uploader: uploader, - stopCh: stopCh, - defaultServiceName: defaultServiceName, - } - return e, nil -} - -// Exporter exports OpenTelemetry spans to a Jaeger agent or collector. -type Exporter struct { - uploader batchUploader - stopOnce sync.Once - stopCh chan struct{} - defaultServiceName string -} - -var _ sdktrace.SpanExporter = (*Exporter)(nil) - -// ExportSpans transforms and exports OpenTelemetry spans to Jaeger. -func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { - // Return fast if context is already canceled or Exporter shutdown. - select { - case <-ctx.Done(): - return ctx.Err() - case <-e.stopCh: - return nil - default: - } - - // Cancel export if Exporter is shutdown. - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - go func(ctx context.Context, cancel context.CancelFunc) { - select { - case <-ctx.Done(): - case <-e.stopCh: - cancel() - } - }(ctx, cancel) - - for _, batch := range jaegerBatchList(spans, e.defaultServiceName) { - if err := e.uploader.upload(ctx, batch); err != nil { - return err - } - } - - return nil -} - -// Shutdown stops the Exporter. This will close all connections and release -// all resources held by the Exporter. -func (e *Exporter) Shutdown(ctx context.Context) error { - // Stop any active and subsequent exports. - e.stopOnce.Do(func() { close(e.stopCh) }) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - return e.uploader.shutdown(ctx) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (e *Exporter) MarshalLog() interface{} { - return struct { - Type string - }{ - Type: "jaeger", - } -} - -func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span { - attr := ss.Attributes() - tags := make([]*gen.Tag, 0, len(attr)) - for _, kv := range attr { - tag := keyValueToTag(kv) - if tag != nil { - tags = append(tags, tag) - } - } - - if is := ss.InstrumentationScope(); is.Name != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name)) - if is.Version != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version)) - } - } - - if ss.SpanKind() != trace.SpanKindInternal { - tags = append(tags, - getStringTag(keySpanKind, ss.SpanKind().String()), - ) - } - - if ss.Status().Code != codes.Unset { - switch ss.Status().Code { - case codes.Ok: - tags = append(tags, getStringTag(keyStatusCode, "OK")) - case codes.Error: - tags = append(tags, getBoolTag(keyError, true)) - tags = append(tags, getStringTag(keyStatusCode, "ERROR")) - } - if ss.Status().Description != "" { - tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description)) - } - } - - var logs []*gen.Log - for _, a := range ss.Events() { - nTags := len(a.Attributes) - if a.Name != "" { - nTags++ - } - if a.DroppedAttributeCount != 0 { - nTags++ - } - fields := make([]*gen.Tag, 0, nTags) - if a.Name != "" { - // If an event contains an attribute with the same key, it needs - // to be given precedence and overwrite this. - fields = append(fields, getStringTag(keyEventName, a.Name)) - } - for _, kv := range a.Attributes { - tag := keyValueToTag(kv) - if tag != nil { - fields = append(fields, tag) - } - } - if a.DroppedAttributeCount != 0 { - fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount))) - } - logs = append(logs, &gen.Log{ - Timestamp: a.Time.UnixNano() / 1000, - Fields: fields, - }) - } - - var refs []*gen.SpanRef - for _, link := range ss.Links() { - tid := link.SpanContext.TraceID() - sid := link.SpanContext.SpanID() - refs = append(refs, &gen.SpanRef{ - TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), - SpanId: int64(binary.BigEndian.Uint64(sid[:])), - RefType: gen.SpanRefType_FOLLOWS_FROM, - }) - } - - tid := ss.SpanContext().TraceID() - sid := ss.SpanContext().SpanID() - psid := ss.Parent().SpanID() - return &gen.Span{ - TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), - SpanId: int64(binary.BigEndian.Uint64(sid[:])), - ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])), - OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv" - Flags: int32(ss.SpanContext().TraceFlags()), - StartTime: ss.StartTime().UnixNano() / 1000, - Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000, - Tags: tags, - Logs: logs, - References: refs, - } -} - -func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag { - var tag *gen.Tag - switch keyValue.Value.Type() { - case attribute.STRING: - s := keyValue.Value.AsString() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VStr: &s, - VType: gen.TagType_STRING, - } - case attribute.BOOL: - b := keyValue.Value.AsBool() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VBool: &b, - VType: gen.TagType_BOOL, - } - case attribute.INT64: - i := keyValue.Value.AsInt64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - case attribute.FLOAT64: - f := keyValue.Value.AsFloat64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VDouble: &f, - VType: gen.TagType_DOUBLE, - } - case attribute.BOOLSLICE, - attribute.INT64SLICE, - attribute.FLOAT64SLICE, - attribute.STRINGSLICE: - data, _ := json.Marshal(keyValue.Value.AsInterface()) - a := (string)(data) - tag = &gen.Tag{ - Key: string(keyValue.Key), - VStr: &a, - VType: gen.TagType_STRING, - } - } - return tag -} - -func getInt64Tag(k string, i int64) *gen.Tag { - return &gen.Tag{ - Key: k, - VLong: &i, - VType: gen.TagType_LONG, - } -} - -func getStringTag(k, s string) *gen.Tag { - return &gen.Tag{ - Key: k, - VStr: &s, - VType: gen.TagType_STRING, - } -} - -func getBoolTag(k string, b bool) *gen.Tag { - return &gen.Tag{ - Key: k, - VBool: &b, - VType: gen.TagType_BOOL, - } -} - -// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch. -func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch { - if len(ssl) == 0 { - return nil - } - - batchDict := make(map[attribute.Distinct]*gen.Batch) - - for _, ss := range ssl { - if ss == nil { - continue - } - - resourceKey := ss.Resource().Equivalent() - batch, bOK := batchDict[resourceKey] - if !bOK { - batch = &gen.Batch{ - Process: process(ss.Resource(), defaultServiceName), - Spans: []*gen.Span{}, - } - } - batch.Spans = append(batch.Spans, spanToThrift(ss)) - batchDict[resourceKey] = batch - } - - // Transform the categorized map into a slice - batchList := make([]*gen.Batch, 0, len(batchDict)) - for _, batch := range batchDict { - batchList = append(batchList, batch) - } - return batchList -} - -// process transforms an OTel Resource into a jaeger Process. -func process(res *resource.Resource, defaultServiceName string) *gen.Process { - var process gen.Process - - var serviceName attribute.KeyValue - if res != nil { - for iter := res.Iter(); iter.Next(); { - if iter.Attribute().Key == semconv.ServiceNameKey { - serviceName = iter.Attribute() - // Don't convert service.name into tag. - continue - } - if tag := keyValueToTag(iter.Attribute()); tag != nil { - process.Tags = append(process.Tags, tag) - } - } - } - - // If no service.name is contained in a Span's Resource, - // that field MUST be populated from the default Resource. - if serviceName.Value.AsString() == "" { - serviceName = semconv.ServiceName(defaultServiceName) - } - process.ServiceName = serviceName.Value.AsString() - - return &process -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go deleted file mode 100644 index 88055c8a300..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "fmt" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-logr/logr" -) - -// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -type reconnectingUDPConn struct { - // `sync/atomic` expects the first word in an allocated struct to be 64-bit - // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. - bufferBytes int64 - hostPort string - resolveFunc resolveFunc - dialFunc dialFunc - logger logr.Logger - - connMtx sync.RWMutex - conn *net.UDPConn - destAddr *net.UDPAddr - closeChan chan struct{} -} - -type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) -type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) - -// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) { - conn := &reconnectingUDPConn{ - hostPort: hostPort, - resolveFunc: resolveFunc, - dialFunc: dialFunc, - logger: logger, - closeChan: make(chan struct{}), - bufferBytes: int64(bufferBytes), - } - - if err := conn.attemptResolveAndDial(); err != nil { - conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout) - } - - go conn.reconnectLoop(resolveTimeout) - - return conn, nil -} - -func (c *reconnectingUDPConn) logf(format string, args ...interface{}) { - if c.logger != emptyLogger { - c.logger.Info(format, args...) - } -} - -func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { - ticker := time.NewTicker(resolveTimeout) - defer ticker.Stop() - - for { - select { - case <-c.closeChan: - return - case <-ticker.C: - if err := c.attemptResolveAndDial(); err != nil { - c.logf("%s", err.Error()) - } - } - } -} - -func (c *reconnectingUDPConn) attemptResolveAndDial() error { - newAddr, err := c.resolveFunc("udp", c.hostPort) - if err != nil { - return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) - } - - c.connMtx.RLock() - curAddr := c.destAddr - c.connMtx.RUnlock() - - // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn - if curAddr != nil && newAddr.String() == curAddr.String() { - return nil - } - - if err := c.attemptDialNewAddr(newAddr); err != nil { - return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) - } - - return nil -} - -func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { - connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) - if err != nil { - return err - } - - if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { - if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { - return err - } - } - - c.connMtx.Lock() - c.destAddr = newAddr - // store prev to close later - prevConn := c.conn - c.conn = connUDP - c.connMtx.Unlock() - - if prevConn != nil { - return prevConn.Close() - } - - return nil -} - -// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning. -func (c *reconnectingUDPConn) Write(b []byte) (int, error) { - var bytesWritten int - var err error - - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - if conn == nil { - // if connection is not initialized indicate this with err in order to hook into retry logic - err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") - } else { - bytesWritten, err = conn.Write(b) - } - - if err == nil { - return bytesWritten, nil - } - - // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again - if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - return conn.Write(b) - } - - // return original error if reconn fails - return bytesWritten, err -} - -// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation. -func (c *reconnectingUDPConn) Close() error { - close(c.closeChan) - - // acquire rw lock before closing conn to ensure calls to Write drain - c.connMtx.Lock() - defer c.connMtx.Unlock() - - if c.conn != nil { - return c.conn.Close() - } - - return nil -} - -// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held -// and SetWriteBuffer is called store bufferBytes to be set for new conns. -func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { - var err error - - c.connMtx.RLock() - conn := c.conn - c.connMtx.RUnlock() - - if conn != nil { - err = c.conn.SetWriteBuffer(bytes) - } - - if err == nil { - atomic.StoreInt64(&c.bufferBytes, int64(bytes)) - } - - return err -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go deleted file mode 100644 index f65e3a6782a..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "net/http" - "time" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" - - gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" -) - -// batchUploader send a batch of spans to Jaeger. -type batchUploader interface { - upload(context.Context, *gen.Batch) error - shutdown(context.Context) error -} - -// EndpointOption configures a Jaeger endpoint. -type EndpointOption interface { - newBatchUploader() (batchUploader, error) -} - -type endpointOptionFunc func() (batchUploader, error) - -func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) { - return fn() -} - -// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent -// over compact thrift protocol. This will use the following environment variables for -// configuration if no explicit option is provided: -// -// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host -// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port -// -// The passed options will take precedence over any environment variables and default values -// will be used if neither are provided. -func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption { - return endpointOptionFunc(func() (batchUploader, error) { - cfg := agentEndpointConfig{ - agentClientUDPParams{ - AttemptReconnecting: true, - Host: envOr(envAgentHost, "localhost"), - Port: envOr(envAgentPort, "6831"), - }, - } - for _, opt := range options { - cfg = opt.apply(cfg) - } - - client, err := newAgentClientUDP(cfg.agentClientUDPParams) - if err != nil { - return nil, err - } - - return &agentUploader{client: client}, nil - }) -} - -// AgentEndpointOption configures a Jaeger agent endpoint. -type AgentEndpointOption interface { - apply(agentEndpointConfig) agentEndpointConfig -} - -type agentEndpointConfig struct { - agentClientUDPParams -} - -type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig - -func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig { - return fn(cfg) -} - -// WithAgentHost sets a host to be used in the agent client endpoint. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable. -// If this option is not passed and the env var is not set, "localhost" will be used by default. -func WithAgentHost(host string) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Host = host - return o - }) -} - -// WithAgentPort sets a port to be used in the agent client endpoint. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable. -// If this option is not passed and the env var is not set, "6831" will be used by default. -func WithAgentPort(port string) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Port = port - return o - }) -} - -var emptyLogger = logr.Logger{} - -// WithLogger sets a logger to be used by agent client. -// WithLogger and WithLogr will overwrite each other. -func WithLogger(logger *log.Logger) AgentEndpointOption { - return WithLogr(stdr.New(logger)) -} - -// WithLogr sets a logr.Logger to be used by agent client. -// WithLogr and WithLogger will overwrite each other. -func WithLogr(logger logr.Logger) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.Logger = logger - return o - }) -} - -// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. -func WithDisableAttemptReconnecting() AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.AttemptReconnecting = false - return o - }) -} - -// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. -func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.AttemptReconnectInterval = interval - return o - }) -} - -// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent. -func WithMaxPacketSize(size int) AgentEndpointOption { - return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { - o.MaxPacketSize = size - return o - }) -} - -// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will -// use the following environment variables for configuration if no explicit option is provided: -// -// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector. -// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint. -// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint. -// -// The passed options will take precedence over any environment variables. -// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used. -// If neither values are provided for the username or the password, they will not be set since there is no default. -func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption { - return endpointOptionFunc(func() (batchUploader, error) { - cfg := collectorEndpointConfig{ - endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"), - username: envOr(envUser, ""), - password: envOr(envPassword, ""), - httpClient: http.DefaultClient, - } - - for _, opt := range options { - cfg = opt.apply(cfg) - } - - return &collectorUploader{ - endpoint: cfg.endpoint, - username: cfg.username, - password: cfg.password, - httpClient: cfg.httpClient, - }, nil - }) -} - -// CollectorEndpointOption configures a Jaeger collector endpoint. -type CollectorEndpointOption interface { - apply(collectorEndpointConfig) collectorEndpointConfig -} - -type collectorEndpointConfig struct { - // endpoint for sending spans directly to a collector. - endpoint string - - // username to be used for authentication with the collector endpoint. - username string - - // password to be used for authentication with the collector endpoint. - password string - - // httpClient to be used to make requests to the collector endpoint. - httpClient *http.Client -} - -type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig - -func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig { - return fn(cfg) -} - -// WithEndpoint is the URL for the Jaeger collector that spans are sent to. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable. -// If this option is not passed and the environment variable is not set, -// "http://localhost:14268/api/traces" will be used by default. -func WithEndpoint(endpoint string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.endpoint = endpoint - return o - }) -} - -// WithUsername sets the username to be used in the authorization header sent for all requests to the collector. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_USER environment variable. -// If this option is not passed and the environment variable is not set, no username will be set. -func WithUsername(username string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.username = username - return o - }) -} - -// WithPassword sets the password to be used in the authorization header sent for all requests to the collector. -// This option overrides any value set for the -// OTEL_EXPORTER_JAEGER_PASSWORD environment variable. -// If this option is not passed and the environment variable is not set, no password will be set. -func WithPassword(password string) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.password = password - return o - }) -} - -// WithHTTPClient sets the http client to be used to make request to the collector endpoint. -func WithHTTPClient(client *http.Client) CollectorEndpointOption { - return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { - o.httpClient = client - return o - }) -} - -// agentUploader implements batchUploader interface sending batches to -// Jaeger through the UDP agent. -type agentUploader struct { - client *agentClientUDP -} - -var _ batchUploader = (*agentUploader)(nil) - -func (a *agentUploader) shutdown(ctx context.Context) error { - done := make(chan error, 1) - go func() { - done <- a.client.Close() - }() - - select { - case <-ctx.Done(): - // Prioritize not blocking the calling thread and just leak the - // spawned goroutine to close the client. - return ctx.Err() - case err := <-done: - return err - } -} - -func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error { - return a.client.EmitBatch(ctx, batch) -} - -// collectorUploader implements batchUploader interface sending batches to -// Jaeger through the collector http endpoint. -type collectorUploader struct { - endpoint string - username string - password string - httpClient *http.Client -} - -var _ batchUploader = (*collectorUploader)(nil) - -func (c *collectorUploader) shutdown(ctx context.Context) error { - // The Exporter will cancel any active exports and will prevent all - // subsequent exports, so nothing to do here. - return nil -} - -func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error { - body, err := serialize(batch) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body) - if err != nil { - return err - } - if c.username != "" && c.password != "" { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Content-Type", "application/x-thrift") - - resp, err := c.httpClient.Do(req) - if err != nil { - return err - } - - _, _ = io.Copy(io.Discard, resp.Body) - if err = resp.Body.Close(); err != nil { - return err - } - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) - } - return nil -} - -func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { - buf := thrift.NewTMemoryBuffer() - if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil { - return nil, err - } - return buf.Buffer, nil -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a8..00000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 2cb9c408f2e..00000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3ef..00000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 7b6b685114a..00000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341fa..00000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/modules.txt b/vendor/modules.txt index e2ac3cd9147..cde784f215d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -566,9 +566,6 @@ github.com/grpc-ecosystem/go-grpc-middleware/util/metautils github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 -## explicit -github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hashicorp/consul/api v1.25.1 ## explicit; go 1.19 github.com/hashicorp/consul/api @@ -1560,13 +1557,6 @@ go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc ## explicit; go 1.20 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/jaeger v1.17.0 -## explicit; go 1.19 -go.opentelemetry.io/otel/exporters/jaeger -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger -go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore -go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift # go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc @@ -1698,7 +1688,6 @@ golang.org/x/mod/semver # golang.org/x/net v0.22.0 => golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/bpf -golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2