diff --git a/.github/dependabot.yml b/.github/dependabot.yml index aeacd58ea5a..59b93d208cd 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -127,6 +127,15 @@ updates: schedule: interval: weekly day: sunday + - package-ecosystem: gomod + directory: /exporters/otlp/otlplog/otlploghttp + labels: + - dependencies + - go + - Skip Changelog + schedule: + interval: weekly + day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlpmetric/otlpmetricgrpc labels: diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ca9a079d0c0..576597c88c7 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: env: - DEFAULT_GO_VERSION: "~1.22.0" + DEFAULT_GO_VERSION: "~1.22.2" jobs: benchmark: name: Benchmarks @@ -27,7 +27,7 @@ jobs: path: ./benchmarks key: ${{ runner.os }}-benchmark - name: Store benchmarks result - uses: benchmark-action/github-action-benchmark@v1.19.3 + uses: benchmark-action/github-action-benchmark@v1.20.1 with: name: Benchmarks tool: 'go' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6f98628fb5b..15814553e9e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ env: # backwards compatibility with the previous two minor releases and we # explicitly test our code for these versions so keeping this at prior # versions does not add value. - DEFAULT_GO_VERSION: "~1.22.0" + DEFAULT_GO_VERSION: "~1.22.2" jobs: lint: runs-on: ubuntu-latest @@ -92,7 +92,7 @@ jobs: cp coverage.txt $TEST_RESULTS cp coverage.html $TEST_RESULTS - name: Upload coverage report - uses: codecov/codecov-action@v4.1.1 + uses: codecov/codecov-action@v4.2.0 with: file: ./coverage.txt fail_ci_if_error: true @@ -107,7 +107,7 @@ jobs: compatibility-test: strategy: matrix: - go-version: ["~1.22.0", "~1.21.3"] + go-version: ["~1.22.2", "~1.21.9"] platform: - os: ubuntu-latest arch: "386" diff --git a/.github/workflows/create-dependabot-pr.yml b/.github/workflows/create-dependabot-pr.yml index bd931aff7c1..e91e137d9b5 100644 --- a/.github/workflows/create-dependabot-pr.yml +++ b/.github/workflows/create-dependabot-pr.yml @@ -10,7 +10,7 @@ jobs: - name: Install Go uses: actions/setup-go@v5 with: - go-version: "~1.21.3" + go-version: "~1.21.9" check-latest: true cache-dependency-path: "**/go.sum" diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml index 117ac33e733..0a85e2d3667 100644 --- a/.github/workflows/dependabot.yml +++ b/.github/workflows/dependabot.yml @@ -13,7 +13,7 @@ jobs: ref: ${{ github.head_ref }} - uses: actions/setup-go@v5 with: - go-version: "~1.21.3" + go-version: "~1.21.9" check-latest: true cache-dependency-path: "**/go.sum" - uses: evantorrie/mott-the-tidier@v1-beta diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a53d537e80..14f104031f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,15 +24,23 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) - Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) - Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) ### Changed - `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) - `NewMemberRaw` in `go.opentelemetry.io/otel/baggage` allows UTF-8 string. (#5132) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. ### Fixed - Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) ### Removed @@ -2974,3 +2982,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/Makefile b/Makefile index da5daf94c09..8b3d8816ea0 100644 --- a/Makefile +++ b/Makefile @@ -280,6 +280,7 @@ semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -303,7 +304,7 @@ add-tags: | $(MULTIMOD) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes diff --git a/bridge/opentracing/test/bridge_grpc_test.go b/bridge/opentracing/test/bridge_grpc_test.go index 68f22ba6960..99ce2d6b8e0 100644 --- a/bridge/opentracing/test/bridge_grpc_test.go +++ b/bridge/opentracing/test/bridge_grpc_test.go @@ -65,7 +65,7 @@ func TestBridgeTracer_ExtractAndInject_gRPC(t *testing.T) { srv, addr := startTestGRPCServer(t, bridge) defer srv.Stop() - conn, err := grpc.Dial( + conn, err := grpc.NewClient( addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(bridge)), diff --git a/bridge/opentracing/test/go.mod b/bridge/opentracing/test/go.mod index 5133c333f69..86c585f96a0 100644 --- a/bridge/opentracing/test/go.mod +++ b/bridge/opentracing/test/go.mod @@ -14,21 +14,21 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/bridge/opentracing v1.24.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/bridge/opentracing/test/go.sum b/bridge/opentracing/test/go.sum index 66f894d869f..575d819cdaa 100644 --- a/bridge/opentracing/test/go.sum +++ b/bridge/opentracing/test/go.sum @@ -13,11 +13,9 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -37,8 +35,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -49,16 +47,13 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod index 5939e543251..b52bc6d7caf 100644 --- a/example/otel-collector/go.mod +++ b/example/otel-collector/go.mod @@ -12,23 +12,22 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 ) require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/example/otel-collector/go.sum b/example/otel-collector/go.sum index e311b64812b..6365346ae13 100644 --- a/example/otel-collector/go.sum +++ b/example/otel-collector/go.sum @@ -7,10 +7,6 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -23,23 +19,20 @@ go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxi go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/example/otel-collector/main.go b/example/otel-collector/main.go index 3e080d68dec..7a4d11dce7f 100644 --- a/example/otel-collector/main.go +++ b/example/otel-collector/main.go @@ -47,19 +47,16 @@ func initProvider() (func(context.Context) error, error) { // `localhost:30080` endpoint. Otherwise, replace `localhost` with the // endpoint of your cluster. If you run the app inside k8s, then you can // probably connect directly to the service through dns. - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - conn, err := grpc.DialContext(ctx, "localhost:30080", + conn, err := grpc.NewClient("localhost:30080", // Note the use of insecure transport here. TLS is recommended in production. grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), ) if err != nil { return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err) } // Set up a trace exporter - traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithGRPCConn(conn)) + traceExporter, err := otlptracegrpc.New(context.Background(), otlptracegrpc.WithGRPCConn(conn)) if err != nil { return nil, fmt.Errorf("failed to create trace exporter: %w", err) } diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod index e35b108c33e..3823ffecd65 100644 --- a/example/prometheus/go.mod +++ b/example/prometheus/go.mod @@ -15,7 +15,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect go.opentelemetry.io/otel/sdk v1.24.0 // indirect diff --git a/example/prometheus/go.sum b/example/prometheus/go.sum index 13d60cbc8ff..6868e94565e 100644 --- a/example/prometheus/go.sum +++ b/example/prometheus/go.sum @@ -15,8 +15,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= diff --git a/exporters/otlp/otlplog/README.md b/exporters/otlp/otlplog/README.md new file mode 100644 index 00000000000..5bde927cc49 --- /dev/null +++ b/exporters/otlp/otlplog/README.md @@ -0,0 +1,3 @@ +# OTLP Log Exporters + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog) diff --git a/exporters/otlp/otlplog/otlploghttp/README.md b/exporters/otlp/otlplog/otlploghttp/README.md new file mode 100644 index 00000000000..14c240b0774 --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/README.md @@ -0,0 +1,3 @@ +# OTLP Log HTTP Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp) diff --git a/exporters/otlp/otlplog/otlploghttp/client.go b/exporters/otlp/otlplog/otlploghttp/client.go new file mode 100644 index 00000000000..5d94868d0b0 --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/client.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + +type client struct { + // TODO: implement. +} + +// newClient creates a new HTTP log client. +func newClient(cfg config) (*client, error) { + // TODO: implement. + return &client{}, nil +} diff --git a/exporters/otlp/otlplog/otlploghttp/config.go b/exporters/otlp/otlplog/otlploghttp/config.go new file mode 100644 index 00000000000..b15a92122af --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/config.go @@ -0,0 +1,198 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" +) + +// Option applies an option to the Exporter. +type Option interface { + applyHTTPOption(config) config +} + +type config struct { + // TODO: implement. +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.applyHTTPOption(c) + } + return c +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. This +// endpoint is specified as a host and optional port, no path or scheme should +// be included (see WithInsecure and WithURLPath). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +func WithEndpoint(endpoint string) Option { + // TODO: implement. + return nil +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +func WithEndpointURL(u string) Option { + // TODO: implement. + return nil +} + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +// WithCompression sets the compression strategy the Exporter will use to +// compress the HTTP body. +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compression strategy will be used. +func WithCompression(compression Compression) Option { + // TODO: implement. + return nil +} + +// WithURLPath sets the URL path the Exporter will send requests to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, the path +// contained in that variable value will be used. If both are set, +// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "/v1/logs" will be used. +func WithURLPath(urlPath string) Option { + // TODO: implement. + return nil +} + +// WithTLSClientConfig sets the TLS configuration the Exporter will use for +// HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, the system default configuration is used. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + // TODO: implement. + return nil +} + +// WithInsecure disables client transport security for the Exporter's HTTP +// connection. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +func WithInsecure() Option { + // TODO: implement. + return nil +} + +// WithHeaders will send the provided headers with each HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + // TODO: implement. + return nil +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the log data is +// dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + // TODO: implement. + return nil +} + +// RetryConfig defines configuration for retrying the export of log data that +// failed. +type RetryConfig struct { + // TODO: implement. +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(rc RetryConfig) Option { + // TODO: implement. + return nil +} + +// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy +// for a given request. This type is compatible with http.Transport.Proxy and +// can be used to set a custom proxy function to the OTLP HTTP client. +type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + +// WithProxy sets the Proxy function the client will use to determine the +// proxy to use for an HTTP request. If this option is not used, the client +// will use [http.ProxyFromEnvironment]. +func WithProxy(pf HTTPTransportProxyFunc) Option { + // TODO: implement. + return nil +} diff --git a/exporters/otlp/otlplog/otlploghttp/doc.go b/exporters/otlp/otlplog/otlploghttp/doc.go new file mode 100644 index 00000000000..0d0ab6a5dd2 --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otlploghttp provides an OTLP log exporter. The exporter uses HTTP to +// transport OTLP protobuf payloads. +package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" diff --git a/exporters/otlp/otlplog/otlploghttp/exporter.go b/exporters/otlp/otlplog/otlploghttp/exporter.go new file mode 100644 index 00000000000..5ca9822a8ff --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + +import ( + "context" + + "go.opentelemetry.io/otel/sdk/log" +) + +// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as +// OTLP protobufs using HTTP. +type Exporter struct { + // TODO: implement. +} + +// Compile-time check Exporter implements [log.Exporter]. +var _ log.Exporter = (*Exporter)(nil) + +// New returns a new [Exporter]. +func New(_ context.Context, options ...Option) (*Exporter, error) { + cfg := newConfig(options) + c, err := newClient(cfg) + if err != nil { + return nil, err + } + return newExporter(c, cfg) +} + +func newExporter(*client, config) (*Exporter, error) { + // TODO: implement + return &Exporter{}, nil +} + +// Export transforms and transmits log records to an OTLP receiver. +func (e *Exporter) Export(ctx context.Context, records []log.Record) error { + // TODO: implement. + return nil +} + +// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform +// no operation after this is called. +func (e *Exporter) Shutdown(ctx context.Context) error { + // TODO: implement. + return nil +} + +// ForceFlush does nothing. The Exporter holds no state. +func (e *Exporter) ForceFlush(ctx context.Context) error { + // TODO: implement. + return nil +} diff --git a/exporters/otlp/otlplog/otlploghttp/go.mod b/exporters/otlp/otlplog/otlploghttp/go.mod new file mode 100644 index 00000000000..6fbeae876cd --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/go.mod @@ -0,0 +1,34 @@ +module go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + +go 1.21 + +require ( + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/otel/sdk/log v0.0.0-20240403115316-6c6e1e7416e9 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/log v0.0.1-alpha // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/sys v0.18.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace go.opentelemetry.io/otel => ../../../.. + +replace go.opentelemetry.io/otel/sdk/log => ../../../../sdk/log + +replace go.opentelemetry.io/otel/trace => ../../../../trace + +replace go.opentelemetry.io/otel/sdk => ../../../../sdk + +replace go.opentelemetry.io/otel/metric => ../../../../metric + +replace go.opentelemetry.io/otel/log => ../../../../log diff --git a/exporters/otlp/otlplog/otlploghttp/go.sum b/exporters/otlp/otlplog/otlploghttp/go.sum new file mode 100644 index 00000000000..e2eeaf7227b --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/go.sum @@ -0,0 +1,19 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporters/otlp/otlplog/otlploghttp/version.go b/exporters/otlp/otlplog/otlploghttp/version.go new file mode 100644 index 00000000000..473d52ae776 --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + +// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. +func Version() string { + return "0.0.0" +} diff --git a/exporters/otlp/otlplog/otlploghttp/version_test.go b/exporters/otlp/otlplog/otlploghttp/version_test.go new file mode 100644 index 00000000000..05f3fc09eb1 --- /dev/null +++ b/exporters/otlp/otlplog/otlploghttp/version_test.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploghttp + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +// regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 +var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`) + +func TestVersionSemver(t *testing.T) { + v := Version() + assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) +} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index e5a96270b52..428cfea2334 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -36,7 +36,7 @@ type client struct { } // newClient creates a new gRPC metric client. -func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { +func newClient(_ context.Context, cfg oconf.Config) (*client, error) { c := &client{ exportTimeout: cfg.Metrics.Timeout, requestFunc: cfg.RetryConfig.RequestFunc(retryable), @@ -54,7 +54,7 @@ func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} dialOpts = append(dialOpts, cfg.DialOptions...) - conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...) + conn, err := grpc.NewClient(cfg.Metrics.Endpoint, dialOpts...) if err != nil { return nil, err } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index ef2746705b5..38d7d60d403 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -185,6 +185,8 @@ func WithServiceConfig(serviceConfig string) Option { // gRPC connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index 344b5e5008d..85ff2ec94d1 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -12,8 +12,8 @@ require ( go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/sdk/metric v1.24.0 go.opentelemetry.io/proto/otlp v1.1.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 - google.golang.org/grpc v1.62.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda + google.golang.org/grpc v1.63.0 google.golang.org/protobuf v1.33.0 ) @@ -21,17 +21,16 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum index 66bea4ce5ad..0fa3c1cba2a 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum @@ -8,10 +8,6 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -28,23 +24,20 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index b2fdae8974c..be1b8b68e34 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -12,7 +12,7 @@ require ( go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/sdk/metric v1.24.0 go.opentelemetry.io/proto/otlp v1.1.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 google.golang.org/protobuf v1.33.0 ) @@ -20,18 +20,17 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum index 66bea4ce5ad..0fa3c1cba2a 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum @@ -8,10 +8,6 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -28,23 +24,20 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client.go b/exporters/otlp/otlptrace/otlptracegrpc/client.go index 94e2cb86dfd..3993df927de 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -78,11 +78,11 @@ func newClient(opts ...Option) *client { } // Start establishes a gRPC connection to the collector. -func (c *client) Start(ctx context.Context) error { +func (c *client) Start(context.Context) error { if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. - conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + conn, err := grpc.NewClient(c.endpoint, c.dialOpts...) if err != nil { return err } diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go index 46d08c238ab..4d626d30b84 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/status" @@ -74,7 +75,11 @@ func TestNewEndToEnd(t *testing.T) { { name: "WithDialOptions", additionalOpts: []otlptracegrpc.Option{ - otlptracegrpc.WithDialOption(grpc.WithBlock()), + otlptracegrpc.WithDialOption( + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: time.Second, + })), }, }, } @@ -144,7 +149,6 @@ func TestExporterShutdown(t *testing.T) { return otlptracegrpc.NewClient( otlptracegrpc.WithEndpoint(mc.endpoint), otlptracegrpc.WithInsecure(), - otlptracegrpc.WithDialOption(grpc.WithBlock()), ) } otlptracetest.RunExporterShutdownTest(t, factory) @@ -368,21 +372,6 @@ func TestNewWithMultipleAttributeTypes(t *testing.T) { } } -func TestStartErrorInvalidAddress(t *testing.T) { - client := otlptracegrpc.NewClient( - otlptracegrpc.WithInsecure(), - // Validate the connection in Start (which should return the error). - otlptracegrpc.WithDialOption( - grpc.WithBlock(), - grpc.FailOnNonTempDialError(true), - ), - otlptracegrpc.WithEndpoint("invalid"), - otlptracegrpc.WithReconnectionPeriod(time.Hour), - ) - err := client.Start(context.Background()) - assert.EqualError(t, err, `connection error: desc = "transport: error while dialing: dial tcp: address invalid: missing port in address"`) -} - func TestEmptyData(t *testing.T) { mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index c087652c45b..373dd768b81 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -11,8 +11,8 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 go.opentelemetry.io/proto/otlp v1.1.0 go.uber.org/goleak v1.3.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 - google.golang.org/grpc v1.62.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda + google.golang.org/grpc v1.63.0 google.golang.org/protobuf v1.33.0 ) @@ -20,15 +20,14 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.sum b/exporters/otlp/otlptrace/otlptracegrpc/go.sum index 9aecdf4f038..6f0b3d85c6a 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.sum +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.sum @@ -8,10 +8,6 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -30,23 +26,20 @@ go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxi go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go index 5c59907cd14..27243fd2a51 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" @@ -173,11 +172,5 @@ func runMockCollectorWithConfig(t *testing.T, mockConfig *mockConfig) *mockColle mc.endpoint = ln.Addr().String() mc.stopFunc = srv.Stop - - // Wait until gRPC server is up. - conn, err := grpc.Dial(mc.endpoint, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(t, err, "grpc.Dial") - require.NoError(t, conn.Close(), "conn.Close") - return mc } diff --git a/exporters/otlp/otlptrace/otlptracegrpc/options.go b/exporters/otlp/otlptrace/otlptracegrpc/options.go index d7559bfb5a1..a9e7f933b34 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -150,6 +150,8 @@ func WithServiceConfig(serviceConfig string) Option { // connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index 4e2e875ad69..4bb127eb6f6 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -10,7 +10,7 @@ require ( go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 go.opentelemetry.io/proto/otlp v1.1.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 google.golang.org/protobuf v1.33.0 ) @@ -18,16 +18,15 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.sum b/exporters/otlp/otlptrace/otlptracehttp/go.sum index 7113a3c58be..49da57b0d45 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.sum +++ b/exporters/otlp/otlptrace/otlptracehttp/go.sum @@ -8,10 +8,6 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= @@ -28,23 +24,20 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go index ea09390c5ba..1a8e28542e7 100644 --- a/exporters/prometheus/exporter.go +++ b/exporters/prometheus/exporter.go @@ -5,6 +5,7 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( "context" + "encoding/hex" "errors" "fmt" "slices" @@ -32,6 +33,9 @@ const ( scopeInfoMetricName = "otel_scope_info" scopeInfoDescription = "Instrumentation Scope metadata" + + traceIDExemplarKey = "trace_id" + spanIDExemplarKey = "span_id" ) var ( @@ -238,7 +242,6 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { - // TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars for _, dp := range histogram.DataPoints { keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) @@ -255,6 +258,7 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra otel.Handle(err) continue } + m = addExemplars(m, dp.Exemplars) ch <- m } } @@ -274,6 +278,7 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata otel.Handle(err) continue } + m = addExemplars(m, dp.Exemplars) ch <- m } } @@ -549,3 +554,37 @@ func (c *collector) validateMetrics(name, description string, metricType *dto.Me return false, "" } + +func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata.Exemplar[N]) prometheus.Metric { + if len(exemplars) == 0 { + return m + } + promExemplars := make([]prometheus.Exemplar, len(exemplars)) + for i, exemplar := range exemplars { + labels := attributesToLabels(exemplar.FilteredAttributes) + // Overwrite any existing trace ID or span ID attributes + labels[traceIDExemplarKey] = hex.EncodeToString(exemplar.TraceID[:]) + labels[spanIDExemplarKey] = hex.EncodeToString(exemplar.SpanID[:]) + promExemplars[i] = prometheus.Exemplar{ + Value: float64(exemplar.Value), + Timestamp: exemplar.Time, + Labels: labels, + } + } + metricWithExemplar, err := prometheus.NewMetricWithExemplars(m, promExemplars...) + if err != nil { + // If there are errors creating the metric with exemplars, just warn + // and return the metric without exemplars. + otel.Handle(err) + return m + } + return metricWithExemplar +} + +func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels { + labels := make(map[string]string) + for _, attr := range attrs { + labels[string(attr.Key)] = attr.Value.Emit() + } + return labels +} diff --git a/exporters/prometheus/exporter_test.go b/exporters/prometheus/exporter_test.go index ee052604eb1..67efaf9cc9c 100644 --- a/exporters/prometheus/exporter_test.go +++ b/exporters/prometheus/exporter_test.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" + dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,6 +23,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" ) func TestPrometheusExporter(t *testing.T) { @@ -898,3 +900,118 @@ func TestShutdownExporter(t *testing.T) { // ensure we aren't unnecessarily logging errors from the shutdown MeterProvider require.NoError(t, handledError) } + +func TestExemplars(t *testing.T) { + attrsOpt := otelmetric.WithAttributes( + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + attribute.Key("E").Bool(true), + attribute.Key("F").Int(42), + ) + for _, tc := range []struct { + name string + recordMetrics func(ctx context.Context, meter otelmetric.Meter) + expectedExemplarValue float64 + }{ + { + name: "counter", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + counter, err := meter.Float64Counter("foo") + require.NoError(t, err) + counter.Add(ctx, 9, attrsOpt) + }, + expectedExemplarValue: 9, + }, + { + name: "histogram", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + hist, err := meter.Int64Histogram("foo") + require.NoError(t, err) + hist.Record(ctx, 9, attrsOpt) + }, + expectedExemplarValue: 9, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Setenv("OTEL_GO_X_EXEMPLAR", "true") + // initialize registry exporter + ctx := context.Background() + registry := prometheus.NewRegistry() + exporter, err := New(WithRegisterer(registry), WithoutTargetInfo(), WithoutScopeInfo()) + require.NoError(t, err) + + // initialize resource + res, err := resource.New(ctx, + resource.WithAttributes(semconv.ServiceName("prometheus_test")), + resource.WithAttributes(semconv.TelemetrySDKVersion("latest")), + ) + require.NoError(t, err) + res, err = resource.Merge(resource.Default(), res) + require.NoError(t, err) + + // initialize provider and meter + provider := metric.NewMeterProvider( + metric.WithReader(exporter), + metric.WithResource(res), + metric.WithView(metric.NewView( + metric.Instrument{Name: "*"}, + metric.Stream{ + // filter out all attributes so they are added as filtered + // attributes to the exemplar + AttributeFilter: attribute.NewAllowKeysFilter(), + }, + )), + ) + meter := provider.Meter("meter", otelmetric.WithInstrumentationVersion("v0.1.0")) + + // Add a sampled span context so that measurements get exemplars added + sc := trace.NewSpanContext(trace.SpanContextConfig{ + SpanID: trace.SpanID{0o1}, + TraceID: trace.TraceID{0o1}, + TraceFlags: trace.FlagsSampled, + }) + ctx = trace.ContextWithSpanContext(ctx, sc) + // Record a single observation with the exemplar + tc.recordMetrics(ctx, meter) + + // Verify that the exemplar is present in the proto version of the + // prometheus metrics. + got, done, err := prometheus.ToTransactionalGatherer(registry).Gather() + defer done() + require.NoError(t, err) + + require.Len(t, got, 1) + family := got[0] + require.Len(t, family.GetMetric(), 1) + metric := family.GetMetric()[0] + var exemplar *dto.Exemplar + switch family.GetType() { + case dto.MetricType_COUNTER: + exemplar = metric.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM: + for _, b := range metric.GetHistogram().GetBucket() { + if b.GetExemplar() != nil { + exemplar = b.GetExemplar() + continue + } + } + } + require.NotNil(t, exemplar) + require.Equal(t, exemplar.GetValue(), tc.expectedExemplarValue) + expectedLabels := map[string]string{ + traceIDExemplarKey: "01000000000000000000000000000000", + spanIDExemplarKey: "0100000000000000", + "A": "B", + "C": "D", + "E": "true", + "F": "42", + } + require.Equal(t, len(expectedLabels), len(exemplar.GetLabel())) + for _, label := range exemplar.GetLabel() { + val, ok := expectedLabels[label.GetName()] + require.True(t, ok) + require.Equal(t, label.GetValue(), val) + } + }) + } +} diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod index 51c44b2ade1..340b0e320f3 100644 --- a/exporters/prometheus/go.mod +++ b/exporters/prometheus/go.mod @@ -4,12 +4,13 @@ go 1.21 require ( github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.6.0 + github.com/prometheus/client_model v0.6.1 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/sdk/metric v1.24.0 + go.opentelemetry.io/otel/trace v1.24.0 google.golang.org/protobuf v1.33.0 ) @@ -23,7 +24,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/sys v0.18.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum index b08cfb3eb83..91b16277f76 100644 --- a/exporters/prometheus/go.sum +++ b/exporters/prometheus/go.sum @@ -20,8 +20,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= diff --git a/handler.go b/handler.go index f63fbdfc13a..07623b67914 100644 --- a/handler.go +++ b/handler.go @@ -7,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -33,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/handler_test.go b/handler_test.go index 4bf8283c358..14cf8ff9c68 100644 --- a/handler_test.go +++ b/handler_test.go @@ -18,6 +18,9 @@ var _ ErrorHandler = &testErrHandler{} func (eh *testErrHandler) Handle(err error) { eh.err = err } func TestGlobalErrorHandler(t *testing.T) { + SetErrorHandler(GetErrorHandler()) + assert.NotPanics(t, func() { Handle(assert.AnError) }, "Default assignment") + e1 := &testErrHandler{} SetErrorHandler(e1) Handle(assert.AnError) diff --git a/internal/global/handler.go b/internal/global/handler.go index 423ee4f8f86..c657ff8e755 100644 --- a/internal/global/handler.go +++ b/internal/global/handler.go @@ -5,23 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -33,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/internal/global/handler_test.go b/internal/global/handler_test.go index db2c1e6e0b0..b887060894c 100644 --- a/internal/global/handler_test.go +++ b/internal/global/handler_test.go @@ -6,225 +6,35 @@ package global import ( "bytes" "errors" - "io" "log" - "sync" + "os" + "strings" "testing" - - "github.com/stretchr/testify/suite" ) -type testErrCatcher []string - -func (l *testErrCatcher) Write(p []byte) (int, error) { - msg := bytes.TrimRight(p, "\n") - (*l) = append(*l, string(msg)) - return len(msg), nil -} - -func (l *testErrCatcher) Reset() { - *l = testErrCatcher([]string{}) -} - -func (l *testErrCatcher) Got() []string { - return []string(*l) -} - -func causeErr(text string) { - Handle(errors.New(text)) -} - -type HandlerTestSuite struct { - suite.Suite - - origHandler ErrorHandler - errCatcher *testErrCatcher -} - -func (s *HandlerTestSuite) SetupSuite() { - s.errCatcher = new(testErrCatcher) - s.origHandler = GlobalErrorHandler.getDelegate() - - GlobalErrorHandler.setDelegate(&ErrLogger{l: log.New(s.errCatcher, "", 0)}) -} - -func (s *HandlerTestSuite) TearDownSuite() { - GlobalErrorHandler.setDelegate(s.origHandler) -} - -func (s *HandlerTestSuite) SetupTest() { - s.errCatcher.Reset() -} - -func (s *HandlerTestSuite) TearDownTest() { - GlobalErrorHandler.setDelegate(&ErrLogger{l: log.New(s.errCatcher, "", 0)}) -} - -func (s *HandlerTestSuite) TestGlobalHandler() { - errs := []string{"one", "two"} - GetErrorHandler().Handle(errors.New(errs[0])) - Handle(errors.New(errs[1])) - s.Assert().Equal(errs, s.errCatcher.Got()) -} +func TestErrDelegator(t *testing.T) { + buf := new(bytes.Buffer) + log.Default().SetOutput(buf) + t.Cleanup(func() { log.Default().SetOutput(os.Stderr) }) -func (s *HandlerTestSuite) TestDelegatedHandler() { - eh := GetErrorHandler() + e := &ErrDelegator{} - newErrLogger := new(testErrCatcher) - SetErrorHandler(&ErrLogger{l: log.New(newErrLogger, "", 0)}) + err := errors.New("testing") + e.Handle(err) - errs := []string{"TestDelegatedHandler"} - eh.Handle(errors.New(errs[0])) - s.Assert().Equal(errs, newErrLogger.Got()) -} - -func (s *HandlerTestSuite) TestNoDropsOnDelegate() { - causeErr("") - s.Require().Len(s.errCatcher.Got(), 1) - - // Change to another Handler. We are testing this is loss-less. - newErrLogger := new(testErrCatcher) - secondary := &ErrLogger{ - l: log.New(newErrLogger, "", 0), + got := buf.String() + if !strings.Contains(got, err.Error()) { + t.Error("default handler did not log") } - SetErrorHandler(secondary) - - causeErr("") - s.Assert().Len(s.errCatcher.Got(), 1, "original Handler used after delegation") - s.Assert().Len(newErrLogger.Got(), 1, "new Handler not used after delegation") -} - -func (s *HandlerTestSuite) TestAllowMultipleSets() { - notUsed := new(testErrCatcher) + buf.Reset() - secondary := &ErrLogger{l: log.New(notUsed, "", 0)} - SetErrorHandler(secondary) - s.Require().Same(GetErrorHandler(), GlobalErrorHandler, "set changed globalErrorHandler") - s.Require().Same(GlobalErrorHandler.getDelegate(), secondary, "new Handler not set") + var gotErr error + e.setDelegate(fnErrHandler(func(e error) { gotErr = e })) + e.Handle(err) - tertiary := &ErrLogger{l: log.New(notUsed, "", 0)} - SetErrorHandler(tertiary) - s.Require().Same(GetErrorHandler(), GlobalErrorHandler, "set changed globalErrorHandler") - s.Assert().Same(GlobalErrorHandler.getDelegate(), tertiary, "user Handler not overridden") -} - -func TestHandlerTestSuite(t *testing.T) { - suite.Run(t, new(HandlerTestSuite)) -} - -func TestHandlerConcurrentSafe(t *testing.T) { - // In order not to pollute the test output. - SetErrorHandler(&ErrLogger{log.New(io.Discard, "", 0)}) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - SetErrorHandler(&ErrLogger{log.New(io.Discard, "", 0)}) - }() - wg.Add(1) - go func() { - defer wg.Done() - Handle(errors.New("error")) - }() - - wg.Wait() - reset() -} - -func BenchmarkErrorHandler(b *testing.B) { - primary := &ErrLogger{l: log.New(io.Discard, "", 0)} - secondary := &ErrLogger{l: log.New(io.Discard, "", 0)} - tertiary := &ErrLogger{l: log.New(io.Discard, "", 0)} - - GlobalErrorHandler.setDelegate(primary) - - err := errors.New("benchmark error handler") - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - GetErrorHandler().Handle(err) - Handle(err) - - SetErrorHandler(secondary) - GetErrorHandler().Handle(err) - Handle(err) - - SetErrorHandler(tertiary) - GetErrorHandler().Handle(err) - Handle(err) - - GlobalErrorHandler.setDelegate(primary) - } - - reset() -} - -var eh ErrorHandler - -func BenchmarkGetDefaultErrorHandler(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - eh = GetErrorHandler() + if buf.String() != "" { + t.Error("delegate not set") + } else if !errors.Is(gotErr, err) { + t.Error("error not passed to delegate") } } - -func BenchmarkGetDelegatedErrorHandler(b *testing.B) { - SetErrorHandler(&ErrLogger{l: log.New(io.Discard, "", 0)}) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - eh = GetErrorHandler() - } - - reset() -} - -func BenchmarkDefaultErrorHandlerHandle(b *testing.B) { - GlobalErrorHandler.setDelegate( - &ErrLogger{l: log.New(io.Discard, "", 0)}, - ) - - eh := GetErrorHandler() - err := errors.New("benchmark default error handler handle") - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - eh.Handle(err) - } - - reset() -} - -func BenchmarkDelegatedErrorHandlerHandle(b *testing.B) { - eh := GetErrorHandler() - SetErrorHandler(&ErrLogger{l: log.New(io.Discard, "", 0)}) - err := errors.New("benchmark delegated error handler handle") - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - eh.Handle(err) - } - - reset() -} - -func BenchmarkSetErrorHandlerDelegation(b *testing.B) { - alt := &ErrLogger{l: log.New(io.Discard, "", 0)} - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - SetErrorHandler(alt) - - reset() - } -} - -func reset() { - GlobalErrorHandler = defaultErrorHandler() -} diff --git a/internal/global/internal_logging_test.go b/internal/global/internal_logging_test.go index 2b55050c8bc..96287146add 100644 --- a/internal/global/internal_logging_test.go +++ b/internal/global/internal_logging_test.go @@ -33,7 +33,7 @@ func TestLoggerConcurrentSafe(t *testing.T) { }() wg.Wait() - reset() + ResetForTest(t) } func TestLogLevel(t *testing.T) { diff --git a/internal/global/state.go b/internal/global/state.go index 976fa610387..204ea142a50 100644 --- a/internal/global/state.go +++ b/internal/global/state.go @@ -14,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -28,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -126,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/internal/global/state_test.go b/internal/global/state_test.go index f54ba7ef09b..fe74e7eab8f 100644 --- a/internal/global/state_test.go +++ b/internal/global/state_test.go @@ -15,6 +15,12 @@ import ( tracenoop "go.opentelemetry.io/otel/trace/noop" ) +type nonComparableErrorHandler struct { + ErrorHandler + + nonComparable func() //nolint:structcheck,unused // This is not called. +} + type nonComparableTracerProvider struct { trace.TracerProvider @@ -27,6 +33,63 @@ type nonComparableMeterProvider struct { nonComparable func() //nolint:structcheck,unused // This is not called. } +type fnErrHandler func(error) + +func (f fnErrHandler) Handle(err error) { f(err) } + +var noopEH = fnErrHandler(func(error) {}) + +func TestSetErrorHandler(t *testing.T) { + t.Run("Set With default is a noop", func(t *testing.T) { + ResetForTest(t) + SetErrorHandler(GetErrorHandler()) + + eh, ok := GetErrorHandler().(*ErrDelegator) + if !ok { + t.Fatal("Global ErrorHandler should be the default ErrorHandler") + } + + if eh.delegate.Load() != nil { + t.Fatal("ErrorHandler should not delegate when setting itself") + } + }) + + t.Run("First Set() should replace the delegate", func(t *testing.T) { + ResetForTest(t) + + SetErrorHandler(noopEH) + + _, ok := GetErrorHandler().(*ErrDelegator) + if ok { + t.Fatal("Global ErrorHandler was not changed") + } + }) + + t.Run("Set() should delegate existing ErrorHandlers", func(t *testing.T) { + ResetForTest(t) + + eh := GetErrorHandler() + SetErrorHandler(noopEH) + + errDel, ok := eh.(*ErrDelegator) + if !ok { + t.Fatal("Wrong ErrorHandler returned") + } + + if errDel.delegate.Load() == nil { + t.Fatal("The ErrDelegator should have a delegate") + } + }) + + t.Run("non-comparable types should not panic", func(t *testing.T) { + ResetForTest(t) + + eh := nonComparableErrorHandler{} + assert.NotPanics(t, func() { SetErrorHandler(eh) }, "delegate") + assert.NotPanics(t, func() { SetErrorHandler(eh) }, "replacement") + }) +} + func TestSetTracerProvider(t *testing.T) { t.Run("Set With default is a noop", func(t *testing.T) { ResetForTest(t) diff --git a/internal/global/util_test.go b/internal/global/util_test.go index 0e0659c0ac3..a23d6228d3e 100644 --- a/internal/global/util_test.go +++ b/internal/global/util_test.go @@ -12,9 +12,11 @@ import ( // its Cleanup step. func ResetForTest(t testing.TB) { t.Cleanup(func() { + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce = sync.Once{} delegateTraceOnce = sync.Once{} delegateTextMapPropagatorOnce = sync.Once{} delegateMeterOnce = sync.Once{} diff --git a/internal/tools/go.mod b/internal/tools/go.mod index a99df180132..c35224add2b 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -6,7 +6,7 @@ require ( github.com/client9/misspell v0.3.4 github.com/gogo/protobuf v1.3.2 github.com/golangci/golangci-lint v1.57.2 - github.com/itchyny/gojq v0.12.14 + github.com/itchyny/gojq v0.12.15 github.com/jcchavezs/porto v0.6.0 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad go.opentelemetry.io/build-tools/crosslink v0.13.0 @@ -141,14 +141,14 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.4.8 // indirect github.com/prometheus/client_golang v1.19.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/ryancurrah/gomodguard v1.3.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -202,7 +202,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 972a2037403..d091cee8b94 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -328,8 +328,8 @@ github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSo github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc= -github.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s= +github.com/itchyny/gojq v0.12.15 h1:WC1Nxbx4Ifw5U2oQWACYz32JK8G9qxNtHzrvW4KEcqI= +github.com/itchyny/gojq v0.12.15/go.mod h1:uWAHCbCIla1jiNxmeT5/B5mOjSdfkCq6p8vxWg+BM10= github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -472,8 +472,8 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -496,8 +496,8 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:r github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -752,8 +752,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/log/keyvalue.go b/log/keyvalue.go index da3d55c4fc8..10920d21f4a 100644 --- a/log/keyvalue.go +++ b/log/keyvalue.go @@ -8,8 +8,10 @@ package log // import "go.opentelemetry.io/otel/log" import ( "bytes" "errors" + "fmt" "math" "slices" + "strconv" "unsafe" "go.opentelemetry.io/otel/internal/global" @@ -265,6 +267,39 @@ func (v Value) Equal(w Value) bool { } } +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case KindString: + return v.asString() + case KindInt64: + return strconv.FormatInt(int64(v.num), 10) + case KindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case KindBool: + return strconv.FormatBool(v.asBool()) + case KindBytes: + return fmt.Sprint(v.asBytes()) + case KindMap: + return fmt.Sprint(v.asMap()) + case KindSlice: + return fmt.Sprint(v.asSlice()) + case KindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + // A KeyValue is a key-value pair used to represent a log attribute (a // superset of [go.opentelemetry.io/otel/attribute.KeyValue]) and map item. type KeyValue struct { @@ -321,3 +356,11 @@ func Map(key string, value ...KeyValue) KeyValue { func Empty(key string) KeyValue { return KeyValue{key, Value{}} } + +// String returns key-value pair as a string, formatted like "key:value". +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (a KeyValue) String() string { + return fmt.Sprintf("%s:%s", a.Key, a.Value) +} diff --git a/log/keyvalue_test.go b/log/keyvalue_test.go index f7c6602de74..2f0211160cf 100644 --- a/log/keyvalue_test.go +++ b/log/keyvalue_test.go @@ -264,6 +264,25 @@ func TestEmpty(t *testing.T) { t.Run("AsMap", testErrKind(v.AsMap, "AsMap", k)) } +func TestValueString(t *testing.T) { + for _, test := range []struct { + v log.Value + want string + }{ + {log.Int64Value(-3), "-3"}, + {log.Float64Value(.15), "0.15"}, + {log.BoolValue(true), "true"}, + {log.StringValue("foo"), "foo"}, + {log.BytesValue([]byte{2, 4, 6}), "[2 4 6]"}, + {log.SliceValue(log.IntValue(3), log.StringValue("foo")), "[3 foo]"}, + {log.MapValue(log.Int("a", 1), log.Bool("b", true)), "[a:1 b:true]"}, + {log.Value{}, ""}, + } { + got := test.v.String() + assert.Equal(t, test.want, got) + } +} + type logSink struct { logr.LogSink diff --git a/sdk/log/batch.go b/sdk/log/batch.go index a17f92f5ed8..bb85a2a34fb 100644 --- a/sdk/log/batch.go +++ b/sdk/log/batch.go @@ -4,7 +4,9 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( + "container/ring" "context" + "sync" "time" ) @@ -76,6 +78,91 @@ func (b *BatchingProcessor) ForceFlush(ctx context.Context) error { return nil } +// queue holds a queue of logging records. +// +// When the queue becomes full, the oldest records in the queue are +// overwritten. +type queue struct { + sync.Mutex + + cap, len int + read, write *ring.Ring +} + +func newQueue(size int) *queue { + r := ring.New(size) + return &queue{ + cap: size, + read: r, + write: r, + } +} + +// Enqueue adds r to the queue. The queue size, including the addition of r, is +// returned. +// +// If enqueueing r will exceed the capacity of q, the oldest Record held in q +// will be dropped and r retained. +func (q *queue) Enqueue(r Record) int { + q.Lock() + defer q.Unlock() + + q.write.Value = r + q.write = q.write.Next() + + q.len++ + if q.len > q.cap { + // Overflow. Advance read to be the new "oldest". + q.len = q.cap + q.read = q.read.Next() + } + return q.len +} + +// TryDequeue attempts to dequeue up to len(buf) Records. The available Records +// will be assigned into buf and passed to write. If write fails, returning +// false, the Records will not be removed from the queue. If write succeeds, +// returning true, the dequeued Records are removed from the queue. The number +// of Records remaining in the queue are returned. +// +// When write is called the lock of q is held. The write function must not call +// other methods of this q that acquire the lock. +func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int { + q.Lock() + defer q.Unlock() + + origRead := q.read + + n := min(len(buf), q.len) + for i := 0; i < n; i++ { + buf[i] = q.read.Value.(Record) + q.read = q.read.Next() + } + + if write(buf[:n]) { + q.len -= n + } else { + q.read = origRead + } + return q.len +} + +// Flush returns all the Records held in the queue and resets it to be +// empty. +func (q *queue) Flush() []Record { + q.Lock() + defer q.Unlock() + + out := make([]Record, q.len) + for i := range out { + out[i] = q.read.Value.(Record) + q.read = q.read.Next() + } + q.len = 0 + + return out +} + type batchingConfig struct { maxQSize setting[int] expInterval setting[time.Duration] diff --git a/sdk/log/batch_test.go b/sdk/log/batch_test.go index 111e2dea374..adbdb1d8bce 100644 --- a/sdk/log/batch_test.go +++ b/sdk/log/batch_test.go @@ -4,13 +4,17 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( + "slices" "strconv" + "sync" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/log" ) func TestNewBatchingConfig(t *testing.T) { @@ -126,3 +130,112 @@ func TestNewBatchingConfig(t *testing.T) { }) } } + +func TestQueue(t *testing.T) { + var r Record + r.SetBody(log.BoolValue(true)) + + t.Run("newQueue", func(t *testing.T) { + const size = 1 + q := newQueue(size) + assert.Equal(t, q.len, 0) + assert.Equal(t, size, q.cap, "capacity") + assert.Equal(t, size, q.read.Len(), "read ring") + assert.Same(t, q.read, q.write, "different rings") + }) + + t.Run("Enqueue", func(t *testing.T) { + const size = 2 + q := newQueue(size) + + var notR Record + notR.SetBody(log.IntValue(10)) + + assert.Equal(t, 1, q.Enqueue(notR), "incomplete batch") + assert.Equal(t, 1, q.len, "length") + assert.Equal(t, size, q.cap, "capacity") + + assert.Equal(t, 2, q.Enqueue(r), "complete batch") + assert.Equal(t, 2, q.len, "length") + assert.Equal(t, size, q.cap, "capacity") + + assert.Equal(t, 2, q.Enqueue(r), "overflow batch") + assert.Equal(t, 2, q.len, "length") + assert.Equal(t, size, q.cap, "capacity") + + assert.Equal(t, []Record{r, r}, q.Flush(), "flushed Records") + }) + + t.Run("Flush", func(t *testing.T) { + const size = 2 + q := newQueue(size) + q.write.Value = r + q.write = q.write.Next() + q.len = 1 + + assert.Equal(t, []Record{r}, q.Flush(), "flushed") + }) + + t.Run("TryFlush", func(t *testing.T) { + const size = 3 + q := newQueue(size) + for i := 0; i < size-1; i++ { + q.write.Value = r + q.write = q.write.Next() + q.len++ + } + + buf := make([]Record, 1) + f := func([]Record) bool { return false } + assert.Equal(t, size-1, q.TryDequeue(buf, f), "not flushed") + require.Equal(t, size-1, q.len, "length") + require.NotSame(t, q.read, q.write, "read ring advanced") + + var flushed []Record + f = func(r []Record) bool { + flushed = append(flushed, r...) + return true + } + if assert.Equal(t, size-2, q.TryDequeue(buf, f), "did not flush len(buf)") { + assert.Equal(t, []Record{r}, flushed, "Records") + } + + buf = slices.Grow(buf, size) + flushed = flushed[:0] + if assert.Equal(t, 0, q.TryDequeue(buf, f), "did not flush len(queue)") { + assert.Equal(t, []Record{r}, flushed, "Records") + } + }) + + t.Run("ConcurrentSafe", func(t *testing.T) { + const goRoutines = 10 + + flushed := make(chan []Record, goRoutines) + out := make([]Record, 0, goRoutines) + done := make(chan struct{}) + go func() { + defer close(done) + for recs := range flushed { + out = append(out, recs...) + } + }() + + var wg sync.WaitGroup + wg.Add(goRoutines) + + b := newQueue(goRoutines) + for i := 0; i < goRoutines; i++ { + go func() { + defer wg.Done() + b.Enqueue(Record{}) + flushed <- b.Flush() + }() + } + + wg.Wait() + close(flushed) + <-done + + assert.Len(t, out, goRoutines, "flushed Records") + }) +} diff --git a/sdk/log/exporter.go b/sdk/log/exporter.go index 9f85f8a1fd9..e3c2cd91421 100644 --- a/sdk/log/exporter.go +++ b/sdk/log/exporter.go @@ -5,6 +5,10 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" + "errors" + "fmt" + "sync" + "sync/atomic" "time" "go.opentelemetry.io/otel" @@ -54,6 +58,37 @@ func (noopExporter) Shutdown(context.Context) error { return nil } func (noopExporter) ForceFlush(context.Context) error { return nil } +// chunkExporter wraps an Exporter's Export method so it is called with +// appropriately sized export payloads. Any payload larger than a defined size +// is chunked into smaller payloads and exported sequentially. +type chunkExporter struct { + Exporter + + // size is the maximum batch size exported. + size int +} + +// newChunkExporter wraps exporter. Calls to the Export will have their records +// payload chuncked so they do not exceed size. If size is less than or equal +// to 0, exporter is returned directly. +func newChunkExporter(exporter Exporter, size int) Exporter { + if size <= 0 { + return exporter + } + return &chunkExporter{Exporter: exporter, size: size} +} + +// Export exports records in chuncks no larger than c.size. +func (c chunkExporter) Export(ctx context.Context, records []Record) error { + n := len(records) + for i, j := 0, min(c.size, n); i < n; i, j = i+c.size, min(j+c.size, n) { + if err := c.Exporter.Export(ctx, records[i:j]); err != nil { + return err + } + } + return nil +} + // timeoutExporter wraps an Exporter and ensures any call to Export will have a // timeout for the context. type timeoutExporter struct { @@ -127,3 +162,132 @@ func (e exportData) respond(err error) { } } } + +// bufferExporter provides asynchronous and synchronous export functionality by +// buffering export requests. +type bufferExporter struct { + Exporter + + input chan exportData + inputMu sync.Mutex + + done chan struct{} + stopped atomic.Bool +} + +// newBufferExporter returns a new bufferExporter that wraps exporter. The +// returned bufferExporter will buffer at most size number of export requests. +// If size is less than zero, zero will be used (i.e. only synchronous +// exporting will be supported). +func newBufferExporter(exporter Exporter, size int) *bufferExporter { + if size < 0 { + size = 0 + } + input := make(chan exportData, size) + return &bufferExporter{ + Exporter: exporter, + + input: input, + done: exportSync(input, exporter), + } +} + +var errStopped = errors.New("exporter stopped") + +func (e *bufferExporter) enqueue(ctx context.Context, records []Record, rCh chan<- error) error { + data := exportData{ctx, records, rCh} + + e.inputMu.Lock() + defer e.inputMu.Unlock() + + // Check stopped before enqueueing now that e.inputMu is held. This + // prevents sends on a closed chan when Shutdown is called concurrently. + if e.stopped.Load() { + return errStopped + } + + select { + case e.input <- data: + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// EnqueueExport enqueues an export of records in the context of ctx to be +// performed asynchronously. This will return true if the exported is +// successfully enqueued, false otherwise. +func (e *bufferExporter) EnqueueExport(records []Record) bool { + if len(records) == 0 { + // Nothing to enqueue, do not waste input space. + return true + } + return e.enqueue(context.Background(), records, nil) == nil +} + +// Export synchronously exports records in the context of ctx. This will not +// return until the export has been completed. +func (e *bufferExporter) Export(ctx context.Context, records []Record) error { + if len(records) == 0 { + return nil + } + + resp := make(chan error, 1) + err := e.enqueue(ctx, records, resp) + if err != nil { + if errors.Is(err, errStopped) { + return nil + } + return fmt.Errorf("%w: dropping %d records", err, len(records)) + } + + select { + case err := <-resp: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// ForceFlush flushes buffered exports. Any existing exports that is buffered +// is flushed before this returns. +func (e *bufferExporter) ForceFlush(ctx context.Context) error { + resp := make(chan error, 1) + err := e.enqueue(ctx, nil, resp) + if err != nil { + if errors.Is(err, errStopped) { + return nil + } + return err + } + + select { + case <-resp: + case <-ctx.Done(): + return ctx.Err() + } + return e.Exporter.ForceFlush(ctx) +} + +// Shutdown shuts down e. +// +// Any buffered exports are flushed before this returns. +// +// All calls to EnqueueExport or Exporter will return nil without any export +// after this is called. +func (e *bufferExporter) Shutdown(ctx context.Context) error { + if e.stopped.Swap(true) { + return nil + } + e.inputMu.Lock() + defer e.inputMu.Unlock() + + // No more sends will be made. + close(e.input) + select { + case <-e.done: + case <-ctx.Done(): + return errors.Join(ctx.Err(), e.Exporter.Shutdown(ctx)) + } + return e.Exporter.Shutdown(ctx) +} diff --git a/sdk/log/exporter_test.go b/sdk/log/exporter_test.go index 3c37b83ad38..85c12860409 100644 --- a/sdk/log/exporter_test.go +++ b/sdk/log/exporter_test.go @@ -5,6 +5,8 @@ package log import ( "context" + "io" + stdlog "log" "slices" "sync" "sync/atomic" @@ -12,6 +14,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log" @@ -32,8 +35,10 @@ type testExporter struct { // Counts of method calls. exportN, shutdownN, forceFlushN *int32 - input chan instruction - done chan struct{} + stopped atomic.Bool + inputMu sync.Mutex + input chan instruction + done chan struct{} } func newTestExporter(err error) *testExporter { @@ -84,7 +89,11 @@ func (e *testExporter) Export(ctx context.Context, r []Record) error { return ctx.Err() } } - e.input <- instruction{Record: &r} + e.inputMu.Lock() + defer e.inputMu.Unlock() + if !e.stopped.Load() { + e.input <- instruction{Record: &r} + } return e.Err } @@ -93,6 +102,12 @@ func (e *testExporter) ExportN() int { } func (e *testExporter) Stop() { + if e.stopped.Swap(true) { + return + } + e.inputMu.Lock() + defer e.inputMu.Unlock() + close(e.input) <-e.done } @@ -115,6 +130,66 @@ func (e *testExporter) ForceFlushN() int { return int(atomic.LoadInt32(e.forceFlushN)) } +func TestChunker(t *testing.T) { + t.Run("ZeroSize", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + c := newChunkExporter(exp, 0) + const size = 100 + _ = c.Export(context.Background(), make([]Record, size)) + + assert.Equal(t, 1, exp.ExportN()) + records := exp.Records() + assert.Len(t, records, 1) + assert.Len(t, records[0], size) + }) + + t.Run("ForceFlush", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + c := newChunkExporter(exp, 0) + _ = c.ForceFlush(context.Background()) + assert.Equal(t, 1, exp.ForceFlushN(), "ForceFlush not passed through") + }) + + t.Run("Shutdown", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + c := newChunkExporter(exp, 0) + _ = c.Shutdown(context.Background()) + assert.Equal(t, 1, exp.ShutdownN(), "Shutdown not passed through") + }) + + t.Run("Chunk", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + c := newChunkExporter(exp, 10) + assert.NoError(t, c.Export(context.Background(), make([]Record, 5))) + assert.NoError(t, c.Export(context.Background(), make([]Record, 25))) + + wantLens := []int{5, 10, 10, 5} + records := exp.Records() + require.Len(t, records, len(wantLens), "chunks") + for i, n := range wantLens { + assert.Lenf(t, records[i], n, "chunk %d", i) + } + }) + + t.Run("ExportError", func(t *testing.T) { + exp := newTestExporter(assert.AnError) + t.Cleanup(exp.Stop) + c := newChunkExporter(exp, 0) + ctx := context.Background() + records := make([]Record, 25) + err := c.Export(ctx, records) + assert.ErrorIs(t, err, assert.AnError, "no chunking") + + c = newChunkExporter(exp, 10) + err = c.Export(ctx, records) + assert.ErrorIs(t, err, assert.AnError, "with chunking") + }) +} + func TestExportSync(t *testing.T) { eventuallyDone := func(t *testing.T, done chan struct{}) { assert.Eventually(t, func() bool { @@ -131,6 +206,12 @@ func TestExportSync(t *testing.T) { var got error handler := otel.ErrorHandlerFunc(func(err error) { got = err }) otel.SetErrorHandler(handler) + t.Cleanup(func() { + l := stdlog.New(io.Discard, "", stdlog.LstdFlags) + otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { + l.Print(err) + })) + }) in := make(chan exportData, 1) exp := newTestExporter(assert.AnError) @@ -243,3 +324,266 @@ func TestTimeoutExporter(t *testing.T) { close(out) }) } + +func TestBufferExporter(t *testing.T) { + t.Run("ConcurrentSafe", func(t *testing.T) { + const goRoutines = 10 + + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, goRoutines) + + ctx := context.Background() + records := make([]Record, 10) + + stop := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < goRoutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-stop: + return + default: + _ = e.EnqueueExport(records) + _ = e.Export(ctx, records) + _ = e.ForceFlush(ctx) + } + } + }() + } + + assert.Eventually(t, func() bool { + return exp.ExportN() > 0 + }, 2*time.Second, time.Microsecond) + + assert.NoError(t, e.Shutdown(ctx)) + close(stop) + wg.Wait() + }) + + t.Run("Shutdown", func(t *testing.T) { + t.Run("Multiple", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 1) + + assert.NoError(t, e.Shutdown(context.Background())) + assert.Equal(t, 1, exp.ShutdownN(), "first Shutdown") + + assert.NoError(t, e.Shutdown(context.Background())) + assert.Equal(t, 1, exp.ShutdownN(), "second Shutdown") + }) + + t.Run("ContextCancelled", func(t *testing.T) { + exp := newTestExporter(assert.AnError) + t.Cleanup(exp.Stop) + + trigger := make(chan struct{}) + exp.ExportTrigger = trigger + t.Cleanup(func() { close(trigger) }) + e := newBufferExporter(exp, 1) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := e.Shutdown(ctx) + assert.ErrorIs(t, err, context.Canceled) + assert.ErrorIs(t, err, assert.AnError) + }) + + t.Run("Error", func(t *testing.T) { + exp := newTestExporter(assert.AnError) + t.Cleanup(exp.Stop) + + e := newBufferExporter(exp, 1) + assert.ErrorIs(t, e.Shutdown(context.Background()), assert.AnError) + }) + }) + + t.Run("ForceFlush", func(t *testing.T) { + t.Run("Multiple", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 2) + + ctx := context.Background() + records := make([]Record, 1) + require.NoError(t, e.enqueue(ctx, records, nil), "enqueue") + + assert.NoError(t, e.ForceFlush(ctx), "ForceFlush records") + assert.Equal(t, 1, exp.ExportN(), "Export number incremented") + assert.Len(t, exp.Records(), 1, "exported Record batches") + + // Nothing to flush. + assert.NoError(t, e.ForceFlush(ctx), "ForceFlush empty") + assert.Equal(t, 1, exp.ExportN(), "Export number changed") + assert.Len(t, exp.Records(), 0, "exported non-zero Records") + }) + + t.Run("ContextCancelled", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + + trigger := make(chan struct{}) + exp.ExportTrigger = trigger + t.Cleanup(func() { close(trigger) }) + e := newBufferExporter(exp, 1) + + ctx, cancel := context.WithCancel(context.Background()) + require.True(t, e.EnqueueExport(make([]Record, 1))) + + got := make(chan error, 1) + go func() { got <- e.ForceFlush(ctx) }() + require.Eventually(t, func() bool { + return exp.ExportN() > 0 + }, 2*time.Second, time.Microsecond) + cancel() // Canceled before export response. + err := <-got + assert.ErrorIs(t, err, context.Canceled, "enqueued") + _ = e.Shutdown(ctx) + + // Zero length buffer + e = newBufferExporter(exp, 0) + assert.ErrorIs(t, e.ForceFlush(ctx), context.Canceled, "not enqueued") + }) + + t.Run("Error", func(t *testing.T) { + exp := newTestExporter(assert.AnError) + t.Cleanup(exp.Stop) + + e := newBufferExporter(exp, 1) + assert.ErrorIs(t, e.ForceFlush(context.Background()), assert.AnError) + }) + + t.Run("Stopped", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + + e := newBufferExporter(exp, 1) + + ctx := context.Background() + _ = e.Shutdown(ctx) + assert.NoError(t, e.ForceFlush(ctx)) + }) + }) + + t.Run("Export", func(t *testing.T) { + t.Run("ZeroRecords", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 1) + + assert.NoError(t, e.Export(context.Background(), nil)) + assert.Equal(t, 0, exp.ExportN()) + }) + + t.Run("Multiple", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 1) + + ctx := context.Background() + records := make([]Record, 1) + records[0].SetBody(log.BoolValue(true)) + + assert.NoError(t, e.Export(ctx, records)) + + n := exp.ExportN() + assert.Equal(t, 1, n, "first Export number") + assert.Equal(t, [][]Record{records}, exp.Records()) + + assert.NoError(t, e.Export(ctx, records)) + assert.Equal(t, n+1, exp.ExportN(), "second Export number") + assert.Equal(t, [][]Record{records}, exp.Records()) + }) + + t.Run("ContextCancelled", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + + trigger := make(chan struct{}) + exp.ExportTrigger = trigger + t.Cleanup(func() { close(trigger) }) + e := newBufferExporter(exp, 1) + + records := make([]Record, 1) + ctx, cancel := context.WithCancel(context.Background()) + + got := make(chan error, 1) + go func() { got <- e.Export(ctx, records) }() + require.Eventually(t, func() bool { + return exp.ExportN() > 0 + }, 2*time.Second, time.Microsecond) + cancel() // Canceled before export response. + err := <-got + assert.ErrorIs(t, err, context.Canceled, "enqueued") + _ = e.Shutdown(ctx) + + // Zero length buffer + e = newBufferExporter(exp, 0) + assert.ErrorIs(t, e.Export(ctx, records), context.Canceled, "not enqueued") + }) + + t.Run("Error", func(t *testing.T) { + exp := newTestExporter(assert.AnError) + t.Cleanup(exp.Stop) + + e := newBufferExporter(exp, 1) + ctx, records := context.Background(), make([]Record, 1) + assert.ErrorIs(t, e.Export(ctx, records), assert.AnError) + }) + + t.Run("Stopped", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + + e := newBufferExporter(exp, 1) + + ctx := context.Background() + _ = e.Shutdown(ctx) + assert.NoError(t, e.Export(ctx, make([]Record, 1))) + assert.Equal(t, 0, exp.ExportN(), "Export called") + }) + }) + + t.Run("EnqueueExport", func(t *testing.T) { + t.Run("ZeroRecords", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 1) + + assert.True(t, e.EnqueueExport(nil)) + e.ForceFlush(context.Background()) + assert.Equal(t, 0, exp.ExportN(), "empty batch enqueued") + }) + + t.Run("Multiple", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 2) + + records := make([]Record, 1) + records[0].SetBody(log.BoolValue(true)) + + assert.True(t, e.EnqueueExport(records)) + assert.True(t, e.EnqueueExport(records)) + e.ForceFlush(context.Background()) + + n := exp.ExportN() + assert.Equal(t, 2, n, "Export number") + assert.Equal(t, [][]Record{records, records}, exp.Records()) + }) + + t.Run("Stopped", func(t *testing.T) { + exp := newTestExporter(nil) + t.Cleanup(exp.Stop) + e := newBufferExporter(exp, 1) + + _ = e.Shutdown(context.Background()) + assert.False(t, e.EnqueueExport(make([]Record, 1))) + }) + }) +} diff --git a/semconv/metric_template.j2 b/semconv/metric_template.j2 new file mode 100644 index 00000000000..144d280a875 --- /dev/null +++ b/semconv/metric_template.j2 @@ -0,0 +1,49 @@ +{%- macro to_go_name(fqn) -%} +{{fqn | replace(".", " ") | replace("_", " ") | title | replace(" ", "")}} +{%- endmacro -%} +{%- macro it_reps(brief) -%} +It represents {% if brief[:2] == "A " or brief[:3] == "An " or brief[:4] == "The " -%} + {{ brief[0]|lower }}{{ brief[1:] }} +{%- else -%} + the {{ brief[0]|lower }}{{ brief[1:] }} +{%- endif -%} +{%- endmacro -%} +{%- macro keydoc(metric) -%} +{%- if metric.stability|string() == "StabilityLevel.DEPRECATED" or not metric.brief-%} +{{ to_go_name(metric.metric_name) }} is the metric conforming to the "{{ metric.metric_name}}" semantic conventions. +{%- else -%} +{{ to_go_name(metric.metric_name) }} is the metric conforming to the "{{ metric.metric_name}}" semantic conventions. {{ it_reps(metric.brief)|trim(".") }}. +{%- endif %} +{%- endmacro -%} +{%- macro format_stability(stability) -%} +{%- if not stability -%} +Experimental +{%- else -%} +{{ stability|replace("StabilityLevel.", "")|capitalize() }} +{%- endif %} +{%- endmacro -%} +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import [[IMPORTPATH]] + +const ( +{% for id in semconvs %} +{%- if semconvs[id].GROUP_TYPE_NAME == 'metric' %}{% set metric = semconvs[id] %} + // {{ keydoc(metric) | wordwrap(76, break_long_words=false, break_on_hyphens=false, wrapstring="\n// ") }} + // Instrument: {{ metric.instrument }} + // Unit: {{ metric.unit }} + // Stability: {{ format_stability(metric.stability) }} +{%- if not metric.brief %} + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. +{%- endif %} + {{to_go_name(metric.metric_name)}}Name = "{{metric.metric_name}}" + {{to_go_name(metric.metric_name)}}Unit = "{{metric.unit}}" +{%- if metric.brief %} + {{to_go_name(metric.metric_name)}}Description = "{{metric.brief}}" +{%- endif %} +{%- endif %} +{% endfor %} +) diff --git a/semconv/v1.24.0/metric.go b/semconv/v1.24.0/metric.go new file mode 100644 index 00000000000..a6b953f625e --- /dev/null +++ b/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/versions.yaml b/versions.yaml index 21825dc1376..13880032285 100644 --- a/versions.yaml +++ b/versions.yaml @@ -44,3 +44,4 @@ module-sets: excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp