rebase: update kubernetes to 1.26.1

update kubernetes and its dependencies
to v1.26.1

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-02-01 18:06:36 +01:00
committed by mergify[bot]
parent e9e33fb851
commit 9c8de9471e
937 changed files with 75539 additions and 33050 deletions

3
vendor/go.opentelemetry.io/otel/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,3 @@
* text=auto eol=lf
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf

View File

@ -10,10 +10,12 @@ coverage.*
gen/
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/prom-collector/prom-collector
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -4,29 +4,243 @@ run:
tests: true #Default
linters:
# Disable everything by default so upgrades to not include new "default
# enabled" linters.
disable-all: true
# Specifically enable linters we want to use.
enable:
- misspell
- goimports
- golint
- deadcode
- depguard
- errcheck
- godot
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues:
# Maximum issues count per one linter.
# Set to 0 to disable.
# Default: 50
# Setting to unlimited so the linter only is run once to debug all issues.
max-issues-per-linter: 0
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
# Setting to unlimited so the linter only is run once to debug all issues.
max-same-issues: 0
# Excluding configuration per-path, per-linter, per-text and per-source.
exclude-rules:
# helpers in tests often (rightfully) pass a *testing.T as their first argument
- path: _test\.go
text: "context.Context should be the first parameter of a function"
# TODO: Having appropriate comments for exported objects helps development,
# even for objects in internal packages. Appropriate comments for all
# exported objects should be added and this exclusion removed.
- path: '.*internal/.*'
text: "exported (method|function|type|const) (.+) should have comment or be unexported"
linters:
- golint
# Yes, they are, but it's okay in a test
- revive
# Yes, they are, but it's okay in a test.
- path: _test\.go
text: "exported func.*returns unexported type.*which can be annoying to use"
linters:
- golint
- revive
# Example test functions should be treated like main.
- path: example.*_test\.go
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
include:
# revive exported should have comment or be unexported.
- EXC0012
# revive package comment should be of the form ...
- EXC0013
linters-settings:
depguard:
# Check the list against standard lib.
# Default: false
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- "crypto/md5"
- "crypto/sha1"
- "crypto/**/pkix"
ignore-file-rules:
- "**/*_test.go"
additional-guards:
# Do not allow testing packages in non-test files.
- list-type: denylist
include-go-root: true
packages:
- testing
- github.com/stretchr/testify
ignore-file-rules:
- "**/*_test.go"
- "**/*test/*.go"
- "**/internal/matchers/*.go"
godot:
exclude:
# Exclude sentence fragments for lists.
- '^[ ]*[-•]'
# Exclude sentences prefixing a list.
- ':$'
goimports:
local-prefixes: go.opentelemetry.io
misspell:
locale: US
ignore-words:
- cancelled
goimports:
local-prefixes: go.opentelemetry.io
revive:
# Sets the default failure confidence.
# This means that linting errors with less than 0.8 confidence will be ignored.
# Default: 0.8
confidence: 0.01
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
- name: blank-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
- name: bool-literal-in-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
- name: context-as-argument
disabled: false
arguments:
allowTypesBefore: "*testing.T"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
- name: context-keys-type
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
- name: deep-exit
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
- name: defer
disabled: false
arguments:
- ["call-chain", "loop"]
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
- name: dot-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
- name: duplicated-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
- name: early-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
- name: empty-block
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
- name: empty-lines
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
- name: error-naming
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
- name: error-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
- name: error-strings
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
- name: errorf
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
- name: exported
disabled: false
arguments:
- "sayRepetitiveInsteadOfStutters"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
- name: flag-parameter
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
- name: identical-branches
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- name: if-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
- name: increment-decrement
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
- name: indent-error-flow
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
- name: import-shadowing
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
- name: package-comments
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
- name: range
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
- name: range-val-in-closure
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
- name: range-val-address
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
- name: redefines-builtin-id
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
- name: string-format
disabled: false
arguments:
- - panic
- '/^[^\n]*$/'
- must not contain line breaks
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
- name: struct-tag
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
- name: superfluous-else
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
- name: time-equal
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
- name: var-naming
disabled: false
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
- name: var-declaration
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
- name: unconditional-recursion
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
- name: unexported-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
- name: unhandled-error
disabled: false
arguments:
- "fmt.Fprint"
- "fmt.Fprintf"
- "fmt.Fprintln"
- "fmt.Print"
- "fmt.Printf"
- "fmt.Println"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
- name: unnecessary-stmt
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
- name: useless-break
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- name: waitgroup-by-value
disabled: false

3
vendor/go.opentelemetry.io/otel/.lycheeignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/

29
vendor/go.opentelemetry.io/otel/.markdownlint.yaml generated vendored Normal file
View File

@ -0,0 +1,29 @@
# Default state for all rules
default: true
# ul-style
MD004: false
# hard-tabs
MD010: false
# line-length
MD013: false
# no-duplicate-header
MD024:
siblings_only: true
#single-title
MD025: false
# ol-prefix
MD029:
style: ordered
# no-inline-html
MD033: false
# fenced-code-language
MD040: false

View File

@ -8,16 +8,637 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.10.0] - 2022-09-09
### Added
- Support Go 1.19. (#3077)
Include compatibility testing and document support. (#3077)
- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
### Changed
### Deprecated
- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
- All exporters will be shutdown even if one reports an error (#3091)
- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
## [1.9.0/0.0.3] - 2022-08-01
### Added
- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
### Fixed
- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
## [1.8.0/0.31.0] - 2022-07-08
### Added
- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
### Changed
- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
### Removed
- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
### Deprecated
- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
Use the equivalent `Scope` struct instead. (#2977)
- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
## [1.7.0/0.30.0] - 2022-04-28
### Added
- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
### Fixed
- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
### Changed
- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
### Deprecated
- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.Attribute` method instead. (#2790)
- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
### Removed
- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
## [0.29.0] - 2022-04-11
### Added
- The metrics global package was added back into several test files. (#2764)
- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
### Removed
- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
### Changed
- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
## [1.6.3] - 2022-04-07
### Fixed
- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
## [1.6.2] - 2022-04-06
### Changed
- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
## [1.6.1] - 2022-03-28
### Fixed
- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
### Security
- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
## [1.6.0/0.28.0] - 2022-03-23
### ⚠️ Notice ⚠️
This update is a breaking change of the unstable Metrics API.
Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
### Added
- Add metrics exponential histogram support.
New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
- Add Go 1.18 to our compatibility tests. (#2679)
- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
### Changed
- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
High-level changes include:
- Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
These `InstrumentProvider`s are managed with a `Meter`.
- Synchronous and asynchronous instruments are grouped into their own packages based on value types.
- Asynchronous callbacks can now be registered with a `Meter`.
Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
### Fixed
- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
## [1.5.0] - 2022-03-16
### Added
- Log the Exporters configuration in the TracerProviders message. (#2578)
- Added support to configure the span limits with environment variables.
The following environment variables are supported. (#2606, #2637)
- `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
- `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_EVENT_COUNT_LIMIT`
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_LINK_COUNT_LIMIT`
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
If the provided environment variables are invalid (negative), the default values would be used.
- Rename the `gc` runtime name to `go` (#2560)
- Add resource container ID detection. (#2418)
- Add span attribute value length limit.
The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
The default limit for this resource is "unlimited". (#2637)
- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
This option replaces the `WithSpanLimits` option.
Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
### Changed
- Drop oldest tracestate `Member` when capacity is reached. (#2592)
- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
### Fixed
- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
- Unlimited span limits are now supported (negative values). (#2636, #2637)
### Deprecated
- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
Use `WithRawSpanLimits` instead.
That option allows setting unlimited and zero limits, this option does not.
This option will be kept until the next major version incremented release. (#2637)
## [1.4.1] - 2022-02-16
### Fixed
- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
## [1.4.0] - 2022-02-11
### Added
- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
To enable use a logger with Verbosity (V level) `>=1`. (#2500)
- Added support to configure the batch span-processor with environment variables.
The following environment variables are used. (#2515)
- `OTEL_BSP_SCHEDULE_DELAY`
- `OTEL_BSP_EXPORT_TIMEOUT`
- `OTEL_BSP_MAX_QUEUE_SIZE`.
- `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
### Changed
- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
### Deprecated
- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
### Fixed
- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
- W3C baggage will now decode urlescaped values. (#2529)
- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
This drop order still only applies to attributes with unique keys not already contained in the span.
If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
### Removed
- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
- [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
- [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
- [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
## [1.3.0] - 2021-12-10
### ⚠️ Notice ⚠️
We have updated the project minimum supported Go version to 1.16
### Added
- Added an internal Logger.
This can be used by the SDK and API to provide users with feedback of the internal state.
To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
### Changed
- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
### Fixed
- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
### Deprecated
- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
### Removed
- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
- Remove the metric Bound Instruments interface and implementations. (#2399)
- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
## [1.2.0] - 2021-11-12
### Changed
- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
- The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
- The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
- The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
### Added
- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
## [1.1.0] - 2021-10-27
### Added
- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
- When upgrading from the `semconv/v1.4.0` package note the following name changes:
- `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
- `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
- `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
- `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
- `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
- `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
### Changed
- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
### Fixed
- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
## [1.0.1] - 2021-10-01
### Fixed
- json stdout exporter no longer crashes due to concurrency bug. (#2265)
## [Metrics 0.24.0] - 2021-10-01
### Changed
- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
- The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
- The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
## [1.0.0] - 2021-09-20
This is the first stable release for the project.
This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
### Added
- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
### Fixed
- Slice-valued attributes can correctly be used as map keys. (#2223)
### Removed
- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
Use the typed functions and methods added to the package instead. (#2235)
- The `Key.Array` method is removed.
- The `Array` function is removed.
- The `Any` function is removed.
- The `ArrayValue` function is removed.
- The `AsArray` function is removed.
## [1.0.0-RC3] - 2021-09-02
### Added
- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
- `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
- Added the `go.opentelemetry.io/otel/example/fib` example package.
Included is an example application that computes Fibonacci numbers. (#2203)
### Changed
- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
- ValueRecorder becomes Histogram
- ValueObserver becomes Gauge
- SumObserver becomes CounterObserver
- UpDownSumObserver becomes UpDownCounterObserver
The API exported from this project is still considered experimental. (#2202)
- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
### Deprecated
- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
The functions from that package should be used instead. (#2166)
- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
Use the typed `*Slice` functions and types added to the package instead. (#2162)
- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
Use the typed functions instead. (#2181)
- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
### Removed
- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
### Fixed
- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195)
- Fixed typos in resources.go. (#2201)
## [1.0.0-RC2] - 2021-07-26
### Added
- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
### Changed
- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
### Deprecated
- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
Use the `trace.ParseTraceState` function instead. (#2122)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
### Fixed
- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
## [Experimental Metrics v0.22.0] - 2021-07-19
### Added
- Adds HTTP support for OTLP metrics exporter. (#2022)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
## [1.0.0-RC1] / 0.21.0 - 2021-06-18
With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
with major version 0.
### Added
- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
- The following status codes are defined as transient errors:
| gRPC Status Code | Description |
| ---------------- | ----------- |
| 1 | Cancelled |
| 4 | Deadline Exceeded |
| 8 | Resource Exhausted |
| 10 | Aborted |
| 10 | Out of Range |
| 14 | Unavailable |
| 15 | Data Loss |
- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
This method returns the number of list-members the `TraceState` holds. (#1937)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
- Several builtin resource detectors now correctly populate the schema URL. (#1938)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
### Changed
- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
`NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
This method returns the status of a span using the new `Status` type. (#1874)
- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
- Unembed `SpanContext` in `Link`. (#1877)
- Generate Semantic conventions from the specification YAML. (#1891)
- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Refactored option types according to the contribution style guide. (#1882)
- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
### Deprecated
- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
### Removed
- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
The `IsRecording` method returns if the span is recording or not.
A read-only span value does not need to know if updates to it will be recorded or not.
By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
- The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
### Fixed
- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
### Security
## [0.20.0] - 2021-04-23
@ -62,6 +683,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
### Fixed
@ -73,6 +695,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
- Fixed typo for default service name in Jaeger Exporter. (#1797)
- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
### Changed
@ -89,19 +713,18 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD`
in compliance with OTel spec (#1752)
to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
- Modify Zipkin Exporter default service name, use default resouce's serviceName instead of empty. (#1777)
- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
- Updated the Jaeger exporter to have a default enpoint, `http://localhost:14250`, for the collector. (#1824)
- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
@ -137,8 +760,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
- The Jaeger exporter `Option` type is removed.
The type is no longer used by the exporter to configure anything.
All of the previous configuration these options provided were duplicates of SDK configuration.
They have all been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
All the previous configurations these options provided were duplicates of SDK configuration.
They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
## [0.19.0] - 2021-03-18
@ -220,7 +843,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Stagger timestamps in exact aggregator tests. (#1569)
- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
- Prevent end-users from implementing some interfaces (#1575)
```
```
"otel/exporters/otlp/otlphttp".Option
"otel/exporters/stdout".Option
"otel/oteltest".Option
@ -233,7 +857,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
"otel/sdk/trace".ParentBasedSamplerOption
"otel/sdk/trace".ReadOnlySpan
"otel/sdk/trace".ReadWriteSpan
```
```
### Removed
- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
@ -815,7 +1440,7 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Rename `Observer` instrument to `ValueObserver`. (#734)
- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
### Fixed
@ -918,7 +1543,6 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
## [0.4.2] - 2020-03-31
### Fixed
@ -978,7 +1602,7 @@ There is still a possibility of breaking changes.
- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
- The zipkin trace exporter. (#495)
- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
- The `StatusMessage` field was add to the trace `Span`. (#524)
- Add `StatusMessage` field to the trace `Span`. (#524)
- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
- The `Resource` type was added to the SDK. (#528)
- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
@ -1071,14 +1695,12 @@ There is still a possibility of breaking changes.
- `AlwaysParentSample` sampler to the trace API. (#455)
- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
### Changed
- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
- Move correlation context propagation to correlation package. (#479)
- Do not default to putting remote span context into links. (#480)
- Propagators extrac
- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
- Renamed the `export` package to `metric` to match directory structure. (#432)
@ -1226,7 +1848,6 @@ There is still a possibility of breaking changes.
This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
This was corrected. (#333)
## [0.1.2] - 2019-11-18
### Fixed
@ -1286,8 +1907,29 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.20.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD
[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0

View File

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @Aneurysm9
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod

View File

@ -15,11 +15,11 @@ join the meeting or get in touch on
You can view and edit the source code by cloning this repository:
```bash
```sh
git clone https://github.com/open-telemetry/opentelemetry-go.git
```
Run `make test` to run the tests instead of `go test`.
Run `make test` to run the tests instead of `go test`.
There are some generated files checked into the repo. To make sure
that the generated files are up-to-date, run `make` (or `make
@ -43,7 +43,7 @@ To create a new PR, fork the project in GitHub and clone the upstream
repo:
```sh
$ go get -d go.opentelemetry.io/otel
go get -d go.opentelemetry.io/otel
```
(This may print some warning about "build constraints exclude all Go
@ -53,7 +53,7 @@ This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
can alternatively use `git` directly with:
```sh
$ git clone https://github.com/open-telemetry/opentelemetry-go
git clone https://github.com/open-telemetry/opentelemetry-go
```
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
@ -66,20 +66,20 @@ current working directory.
Enter the newly created directory and add your fork as a new remote:
```sh
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md`, and push the branch to your fork:
```sh
$ git checkout -b <YOUR_BRANCH_NAME>
git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
$ make precommit
$ git add -p
$ git commit
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
make precommit
git add -p
git commit
git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
@ -113,6 +113,7 @@ A PR is considered to be **ready to merge** when:
one day and may be merged with a single Maintainer's approval.
* `CHANGELOG.md` has been updated to reflect what has been
added, changed, removed, or fixed.
* `README.md` has been updated if necessary.
* Urgent fix can take exception as long as it has been actively
communicated.
@ -140,8 +141,28 @@ It is preferable to have contributions follow the idioms of the
language rather than conform to specific API names or argument
patterns in the spec.
For a deeper discussion, see:
https://github.com/open-telemetry/opentelemetry-specification/issues/165
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
## Documentation
Each non-example Go Module should have its own `README.md` containing:
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
- Brief description.
- Installation instructions (and requirements if applicable).
- Hyperlink to an example. Depending on the component the example can be:
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- A sample Go application with its own `README.md`, like [here](example/zipkin).
- Additional documentation sections such us:
- Configuration,
- Contributing,
- References.
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
Moreover, it should be possible to navigate to any `README.md` from the
root `README.md`.
## Style Guide
@ -166,32 +187,40 @@ to the reasons why.
### Configuration
When creating an instantiation function for a complex `struct` it is useful
to allow variable number of options to be applied. However, the strong type
system of Go restricts the function design options. There are a few ways to
solve this problem, but we have landed on the following design.
When creating an instantiation function for a complex `type T struct`, it is
useful to allow variable number of options to be applied. However, the strong
type system of Go restricts the function design options. There are a few ways
to solve this problem, but we have landed on the following design.
#### `config`
Configuration should be held in a `struct` named `config`, or prefixed with
specific type name this Configuration applies to if there are multiple
`config` in the package. This `struct` must contain configuration options.
`config` in the package. This type must contain configuration options.
```go
// config contains configuration options for a thing.
type config struct {
// options ...
// options ...
}
```
In general the `config` `struct` will not need to be used externally to the
In general the `config` type will not need to be used externally to the
package and should be unexported. If, however, it is expected that the user
will likely want to build custom options for the configuration, the `config`
should be exported. Please, include in the documentation for the `config`
how the user can extend the configuration.
It is important that `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another.
It is important that internal `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another. The
one exception is the API packages. The configs from the base API, eg.
`go.opentelemetry.io/otel/trace.TracerConfig` and
`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
by the SDK therefor it is expected that these are exported.
When a config is exported we want to maintain forward and backward
compatibility, to achieve this no fields should be exported but should
instead be accessed by methods.
Optionally, it is common to include a `newConfig` function (with the same
naming scheme). This function wraps any defaults setting and looping over
@ -199,14 +228,14 @@ all options to create a configured `config`.
```go
// newConfig returns an appropriately configured config.
func newConfig([]Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
option.Apply(&config)
}
// Preform any validation here.
return config
func newConfig(options ...Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
config = option.apply(config)
}
// Preform any validation here.
return config
}
```
@ -224,10 +253,17 @@ To set the value of the options a `config` contains, a corresponding
```go
type Option interface {
Apply(*config)
apply(config) config
}
```
Having `apply` unexported makes sure that it will not be used externally.
Moreover, the interface becomes sealed so the user cannot easily implement
the interface on its own.
The `apply` method should return a modified version of the passed config.
This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all).
@ -250,53 +286,74 @@ func With*(…) Option { … }
```go
type defaultFalseOption bool
func (o defaultFalseOption) Apply(c *config) {
c.Bool = bool(o)
func (o defaultFalseOption) apply(c config) config {
c.Bool = bool(o)
return c
}
// WithOption sets a T* to have an option included.
// WithOption sets a T to have an option included.
func WithOption() Option {
return defaultFalseOption(true)
return defaultFalseOption(true)
}
```
```go
type defaultTrueOption bool
func (o defaultTrueOption) Apply(c *config) {
c.Bool = bool(o)
func (o defaultTrueOption) apply(c config) config {
c.Bool = bool(o)
return c
}
// WithoutOption sets a T* to have Bool option excluded.
// WithoutOption sets a T to have Bool option excluded.
func WithoutOption() Option {
return defaultTrueOption(false)
return defaultTrueOption(false)
}
````
```
##### Declared Type Options
```go
type myTypeOption struct {
MyType MyType
MyType MyType
}
func (o myTypeOption) Apply(c *config) {
c.MyType = o.MyType
func (o myTypeOption) apply(c config) config {
c.MyType = o.MyType
return c
}
// WithMyType sets T* to have include MyType.
// WithMyType sets T to have include MyType.
func WithMyType(t MyType) Option {
return myTypeOption{t}
return myTypeOption{t}
}
```
##### Functional Options
```go
type optionFunc func(config) config
func (fn optionFunc) apply(c config) config {
return fn(c)
}
// WithMyType sets t as MyType.
func WithMyType(t MyType) Option {
return optionFunc(func(c config) config {
c.MyType = t
return c
})
}
```
#### Instantiation
Using this configuration pattern to configure instantiation with a `New*`
Using this configuration pattern to configure instantiation with a `NewT`
function.
```go
func NewT*(options ...Option) T* {}
func NewT(options ...Option) T {}
```
Any required parameters can be declared before the variadic `options`.
@ -320,12 +377,12 @@ type config struct {
// DogOption apply Dog specific options.
type DogOption interface {
ApplyDog(*config)
applyDog(config) config
}
// BirdOption apply Bird specific options.
type BirdOption interface {
ApplyBird(*config)
applyBird(config) config
}
// Option apply options for all animals.
@ -335,23 +392,42 @@ type Option interface {
}
type weightOption float64
func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
func WithWeight(w float64) Option { return weightOption(w) }
func (o weightOption) applyDog(c config) config {
c.Weight = float64(o)
return c
}
func (o weightOption) applyBird(c config) config {
c.Weight = float64(o)
return c
}
func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string
func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
func WithFurColor(c string) DogOption { return furColorOption(c) }
func (o furColorOption) applyDog(c config) config {
c.Color = string(o)
return c
}
func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64
func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func (o maxAltitudeOption) applyBird(c config) config {
c.MaxAltitude = float64(o)
return c
}
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {}
func NewBird(name string, o ...BirdOption) Bird {}
```
### Interface Type
### Interfaces
To allow other developers to better comprehend the code, it is important
to ensure it is sufficiently documented. One simple measure that contributes
@ -359,21 +435,91 @@ to this aim is self-documenting by naming method parameters. Therefore,
where appropriate, methods of every exported interface type should have
their parameters appropriately named.
#### Interface Stability
All exported stable interfaces that include the following warning in their
doumentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
Otherwise, stable interfaces MUST NOT be modified.
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
be a super-set of the original interface. For example, if you wanted to a
`Close` method to the `Exporter` interface:
```go
type Exporter interface {
Export()
}
```
A new interface, `Closer`, can be added:
```go
type Closer interface {
Close()
}
```
Code that is passed the `Exporter` interface can now check to see if the passed
value also satisfies the new interface. E.g.
```go
func caller(e Exporter) {
/* ... */
if c, ok := e.(Closer); ok {
c.Close()
}
/* ... */
}
```
Alternatively, a new type that is the super-set of an `Exporter` can be created.
```go
type ClosingExporter struct {
Exporter
Close()
}
```
This new type can be used similar to the simple interface above in that a
passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
and the `Close` method called.
This super-set approach can be useful if there is explicit behavior that needs
to be coupled with the original type and passed as a unified type to a new
function, but, because of this coupling, it also limits the applicability of
the added functionality. If there exist other interfaces where this
functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
## Approvers and Maintainers
Approvers:
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM)
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta
Maintainers:
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Tyler Yahn](https://github.com/MrAlias), Splunk
Emeritus:
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community

View File

@ -12,13 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort)
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go
@ -27,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: dependabot-check license-check lint build examples test-default
ci: precommit check-clean-work-tree test-coverage
precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@ -40,114 +38,147 @@ $(TOOLS)/%: | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
$(GO) build -o $@ $(PACKAGE)
MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
SEMCONVGEN = $(TOOLS)/semconvgen
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
DBOTCONF = $(TOOLS)/dbotconf
$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
MISSPELL = $(TOOLS)/misspell
$(TOOLS)/misspell: PACKAGE= github.com/client9/misspell/cmd/misspell
$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
GOCOVMERGE = $(TOOLS)/gocovmerge
$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
STRINGER = $(TOOLS)/stringer
$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
PORTO = $(TOOLS)/porto
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
.PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(STRINGER) $(TOOLS)/gojq
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
# Build
.PHONY: examples generate build
examples:
@set -e; for dir in $(EXAMPLES); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build .); \
done
.PHONY: generate build
generate: $(STRINGER)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) generate $${dir}/..."; \
(cd "$${dir}" && \
PATH="$(TOOLS):$${PATH}" $(GO) generate ./...); \
done
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
generate/%: | $(STRINGER) $(PORTO)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
build: generate
# Build all package code including testing code.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build ./... && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \
done
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build/%: DIR=$*
build/%:
@echo "$(GO) build $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) build ./...
build-tests/%: DIR=$*
build-tests/%:
@echo "$(GO) build tests $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
# Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
.PHONY: $(TEST_TARGETS) test
test-default: ARGS=-v -race
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-verbose: ARGS=-v
test-race: ARGS=-race
test-verbose: ARGS=-v -race
$(TEST_TARGETS): test
test:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \
done
test: $(OTEL_GO_MOD_DIRS:%=test/%)
test/%: DIR=$*
test/%:
@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage
test-coverage:
test-coverage: | $(GOCOVMERGE)
@set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
echo "$(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| grep -v 'semconv/v.*' \
| xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=coverage.out -o coverage.html); \
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
done; \
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
golangci-lint/%: | $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
crosslink: | $(CROSSLINK)
@echo "Updating intra-repository dependencies in all go modules" \
&& $(CROSSLINK) --root=$(shell pwd) --prune
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy -compat=1.17
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint
lint: misspell lint-modules | $(GOLANGCI_LINT)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(GOLANGCI_LINT) run --fix && \
$(GOLANGCI_LINT) run); \
done
lint: misspell lint-modules golangci-lint
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
.PHONY: misspell
misspell: | $(MISSPELL)
$(MISSPELL) -w $(ALL_DOCS)
.PHONY: lint-modules
lint-modules: | $(CROSSLINK)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "$(GO) mod tidy in $${dir}"; \
(cd "$${dir}" && \
$(GO) mod tidy); \
done
echo "cross-linking all go modules"
$(CROSSLINK)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
@ -155,18 +186,14 @@ license-check:
exit 1; \
fi
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
do grep -q "$$f" .github/dependabot.yml \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
exit 1; \
fi
dependabot-check: | $(DBOTCONF)
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
.PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF)
@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
.PHONY: check-clean-work-tree
check-clean-work-tree:
@ -177,3 +204,23 @@ check-clean-work-tree:
git status; \
exit 1; \
fi
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
@[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
@[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease
prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
COMMIT ?= "HEAD"
.PHONY: add-tags
add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}

View File

@ -1,23 +1,24 @@
# OpenTelemetry-Go
[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
## Project Status
**Warning**: this project is currently in a pre-GA phase. Backwards
incompatible changes may be introduced in subsequent minor version releases as
we work to track the evolving OpenTelemetry specification and user feedback.
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
| Metrics | Alpha | N/A |
| Logs | Frozen [1] | N/A |
Our progress towards a GA release candidate is tracked in [this project
board](https://github.com/orgs/open-telemetry/projects/5). This release
candidate will follow semantic versioning and will be released with a major
version greater than zero.
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our local
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@ -29,20 +30,41 @@ Project versioning information and stability guarantees can be found in the
### Compatibility
This project is tested on the following systems.
OpenTelemetry-Go ensures compatibility with the current supported versions of
the [Go language](https://golang.org/doc/devel/release#policy):
> Each major Go release is supported until there are two newer major releases.
> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
For versions of Go that are no longer supported upstream, opentelemetry-go will
stop ensuring compatibility with these versions in the following manner:
- A minor release of opentelemetry-go will be made to add support for the new
supported release of Go.
- The following minor release of opentelemetry-go will remove compatibility
testing for the oldest (now archived upstream) version of Go. This, and
future, releases of opentelemetry-go may include features only supported by
the currently supported versions of Go.
Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
| Ubuntu | 1.15 | amd64 |
| Ubuntu | 1.14 | amd64 |
| Ubuntu | 1.15 | 386 |
| Ubuntu | 1.14 | 386 |
| MacOS | 1.15 | amd64 |
| MacOS | 1.14 | amd64 |
| Windows | 1.15 | amd64 |
| Windows | 1.14 | amd64 |
| Windows | 1.15 | 386 |
| Windows | 1.14 | 386 |
| Ubuntu | 1.19 | amd64 |
| Ubuntu | 1.18 | amd64 |
| Ubuntu | 1.17 | amd64 |
| Ubuntu | 1.19 | 386 |
| Ubuntu | 1.18 | 386 |
| Ubuntu | 1.17 | 386 |
| MacOS | 1.19 | amd64 |
| MacOS | 1.18 | amd64 |
| MacOS | 1.17 | amd64 |
| Windows | 1.19 | amd64 |
| Windows | 1.18 | amd64 |
| Windows | 1.17 | amd64 |
| Windows | 1.19 | 386 |
| Windows | 1.18 | 386 |
| Windows | 1.17 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
@ -68,7 +90,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/
If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
to use the
[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
package. The included [examples](./example/) are a good way to see some
practical uses of this process.
@ -77,15 +99,15 @@ practical uses of this process.
Now that your application is instrumented to collect telemetry, it needs an
export pipeline to send that telemetry to an observability platform.
You can find officially supported exporters [here](./exporters/) and in the
companion [contrib
repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters/metric).
Additionally, there are many vendor specific or 3rd party exporters for
OpenTelemetry. These exporters are broken down by
[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
and
[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
support.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
| :-----------------------------------: | :-----: | :----: |
| [Jaeger](./exporters/jaeger/) | | ✓ |
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ |
## Contributing

View File

@ -1,22 +1,51 @@
# Release Process
## Semantic Convention Generation
New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
2. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
export TAG="v1.7.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG"
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
## Pre-Release
First, decide which module sets will be released and update their versions
in `versions.yaml`. Commit this change to a new branch.
Update go.mod for submodules to depend on the new release which will happen in the next step.
1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
1. Run the `prerelease` make target. It creates a branch
`prerelease_<module set>_<new tag>` that will contain all release changes.
```
./pre_release.sh -t <new tag>
make prerelease MODSET=<module set>
```
2. Verify the changes.
```
git diff main
git diff ...prerelease_<module set>_<new tag>
```
This should have changed the version for all modules to be `<new tag>`.
If these changes look correct, merge them into your pre-release branch:
```go
git merge prerelease_<module set>_<new tag>
```
3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
@ -32,24 +61,28 @@ Update go.mod for submodules to depend on the new release which will happen in t
4. Push the changes to upstream and create a Pull Request on GitHub.
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
## Tag
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
Failure to do so will leave things in a broken state.
Failure to do so will leave things in a broken state. As long as you do not
change `versions.yaml` between pre-release and this step, things should be fine.
***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
It is critical you make sure the version you push upstream is correct.
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
1. Run the tag.sh script using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
1. For each module set that will be released, run the `add-tags` make target
using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
```
./tag.sh <new tag> <commit-hash>
make add-tags MODSET=<module set> COMMIT=<commit hash>
```
It should only be necessary to provide an explicit `COMMIT` value if the
current `HEAD` of your working directory is not the correct commit.
2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
Make sure you push all sub-modules as well.
@ -63,7 +96,6 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
## Verify Examples
@ -76,6 +108,15 @@ After releasing verify that examples build outside of the repository.
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
This ensures they build with the published release, not the local copy.
## Contrib Repository
## Post-Release
### Contrib Repository
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
### Website Documentation
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification

View File

@ -12,7 +12,14 @@ is designed so the following goals can be achieved.
* [Semantic import
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
will be used.
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
* Versions will comply with [semver
2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
* New methods may be added to exported API interfaces. All exported
interfaces that fall within this exception will include the following
paragraph in their public documentation.
> Warning: methods may be added to this interface in minor releases.
* If a module is version `v2` or higher, the major version of the module
must be included as a `/vN` at the end of the module paths used in
`go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
@ -143,10 +150,10 @@ The `otel` package is refactored to remove its dependencies on `otel/metric` so
it can be released as stable as well. With that done the following release
candidates are made:
* `otel`: `v1.0.0-rc.1`
* `otel/trace`: `v1.0.0-rc.1`
* `otel/baggage`: `v1.0.0-rc.1`
* `otel/sdk/trace`: `v1.0.0-rc.1`
* `otel`: `v1.0.0-RC1`
* `otel/trace`: `v1.0.0-RC1`
* `otel/baggage`: `v1.0.0-RC1`
* `otel/sdk/trace`: `v1.0.0-RC1`
The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
@ -154,10 +161,10 @@ A few minor issues are discovered in the `otel/trace` package. These issues are
resolved with some minor, but backwards incompatible, changes and are released
as a second release candidate:
* `otel`: `v1.0.0-rc.2`
* `otel/trace`: `v1.0.0-rc.2`
* `otel/baggage`: `v1.0.0-rc.2`
* `otel/sdk/trace`: `v1.0.0-rc.2`
* `otel`: `v1.0.0-RC2`
* `otel/trace`: `v1.0.0-RC2`
* `otel/baggage`: `v1.0.0-RC2`
* `otel/sdk/trace`: `v1.0.0-RC2`
Notice that all module version numbers are incremented to adhere to our
versioning policy.
@ -198,12 +205,12 @@ As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
point where they should be evaluated for stability. The `otel` module is
reintegrated with the `otel/metric` package and the following release is made:
* `otel`: `v1.1.0-rc.1`
* `otel/trace`: `v1.1.0-rc.1`
* `otel/metric`: `v1.1.0-rc.1`
* `otel/baggage`: `v1.1.0-rc.1`
* `otel/sdk/trace`: `v1.1.0-rc.1`
* `otel/sdk/metric`: `v1.1.0-rc.1`
* `otel`: `v1.1.0-RC1`
* `otel/trace`: `v1.1.0-RC1`
* `otel/metric`: `v1.1.0-RC1`
* `otel/baggage`: `v1.1.0-RC1`
* `otel/sdk/trace`: `v1.1.0-RC1`
* `otel/sdk/metric`: `v1.1.0-RC1`
All the modules are evaluated and determined to a viable stable release. They
are then released as version `v1.1.0` (the minor version is incremented to

View File

@ -12,9 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// package attribute provides key and value attributes.
//
// This package is currently in a pre-GA phase. Backwards incompatible changes
// may be introduced in subsequent minor version releases as we work to track
// the evolving OpenTelemetry specification and user feedback.
// Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -21,19 +21,17 @@ import (
)
type (
// Encoder is a mechanism for serializing a label set into a
// specific string representation that supports caching, to
// avoid repeated serialization. An example could be an
// exporter encoding the label set into a wire representation.
// Encoder is a mechanism for serializing an attribute set into a specific
// string representation that supports caching, to avoid repeated
// serialization. An example could be an exporter encoding the attribute
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the label
// set using its Iterator. This result may be cached
// by a attribute.Set.
// Encode returns the serialized encoding of the attribute set using
// its Iterator. This result may be cached by a attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of
// label encoder. Label encoders allocate these using
// `NewEncoderID`.
// ID returns a value that is unique for each class of attribute
// encoder. Attribute encoders allocate these using `NewEncoderID`.
ID() EncoderID
}
@ -43,54 +41,53 @@ type (
value uint64
}
// defaultLabelEncoder uses a sync.Pool of buffers to reduce
// the number of allocations used in encoding labels. This
// implementation encodes a comma-separated list of key=value,
// with '/'-escaping of '=', ',', and '\'.
defaultLabelEncoder struct {
// pool is a pool of labelset builders. The buffers in this
// pool grow to a size that most label encodings will not
// allocate new memory.
// defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
// allocations used in encoding attributes. This implementation encodes a
// comma-separated list of key=value, with '/'-escaping of '=', ',', and
// '\'.
defaultAttrEncoder struct {
// pool is a pool of attribute set builders. The buffers in this pool
// grow to a size that most attribute encodings will not allocate new
// memory.
pool sync.Pool // *bytes.Buffer
}
)
// escapeChar is used to ensure uniqueness of the label encoding where
// keys or values contain either '=' or ','. Since there is no parser
// needed for this encoding and its only requirement is to be unique,
// this choice is arbitrary. Users will see these in some exporters
// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
// escapeChar is used to ensure uniqueness of the attribute encoding where
// keys or values contain either '=' or ','. Since there is no parser needed
// for this encoding and its only requirement is to be unique, this choice is
// arbitrary. Users will see these in some exporters (e.g., stdout), so the
// backslash ('\') is used as a conventional choice.
const escapeChar = '\\'
var (
_ Encoder = &defaultLabelEncoder{}
_ Encoder = &defaultAttrEncoder{}
// encoderIDCounter is for generating IDs for other label
// encoders.
// encoderIDCounter is for generating IDs for other attribute encoders.
encoderIDCounter uint64
defaultEncoderOnce sync.Once
defaultEncoderID = NewEncoderID()
defaultEncoderInstance *defaultLabelEncoder
defaultEncoderInstance *defaultAttrEncoder
)
// NewEncoderID returns a unique label encoder ID. It should be
// called once per each type of label encoder. Preferably in init() or
// in var definition.
// NewEncoderID returns a unique attribute encoder ID. It should be called
// once per each type of attribute encoder. Preferably in init() or in var
// definition.
func NewEncoderID() EncoderID {
return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
}
// DefaultEncoder returns a label encoder that encodes labels
// in such a way that each escaped label's key is followed by an equal
// sign and then by an escaped label's value. All key-value pairs are
// separated by a comma.
// DefaultEncoder returns an attribute encoder that encodes attributes in such
// a way that each escaped attribute's key is followed by an equal sign and
// then by an escaped attribute's value. All key-value pairs are separated by
// a comma.
//
// Escaping is done by prepending a backslash before either a
// backslash, equal sign or a comma.
// Escaping is done by prepending a backslash before either a backslash, equal
// sign or a comma.
func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultLabelEncoder{
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
@ -101,15 +98,14 @@ func DefaultEncoder() Encoder {
return defaultEncoderInstance
}
// Encode is a part of an implementation of the LabelEncoder
// interface.
func (d *defaultLabelEncoder) Encode(iter Iterator) string {
// Encode is a part of an implementation of the AttributeEncoder interface.
func (d *defaultAttrEncoder) Encode(iter Iterator) string {
buf := d.pool.Get().(*bytes.Buffer)
defer d.pool.Put(buf)
buf.Reset()
for iter.Next() {
i, keyValue := iter.IndexedLabel()
i, keyValue := iter.IndexedAttribute()
if i > 0 {
_, _ = buf.WriteRune(',')
}
@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string {
return buf.String()
}
// ID is a part of an implementation of the LabelEncoder interface.
func (*defaultLabelEncoder) ID() EncoderID {
// ID is a part of an implementation of the AttributeEncoder interface.
func (*defaultAttrEncoder) ID() EncoderID {
return defaultEncoderID
}
@ -137,9 +133,9 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val {
switch ch {
case '=', ',', escapeChar:
buf.WriteRune(escapeChar)
_, _ = buf.WriteRune(escapeChar)
}
buf.WriteRune(ch)
_, _ = buf.WriteRune(ch)
}
}

View File

@ -14,16 +14,16 @@
package attribute // import "go.opentelemetry.io/otel/attribute"
// Iterator allows iterating over the set of labels in order,
// sorted by key.
// Iterator allows iterating over the set of attributes in order, sorted by
// key.
type Iterator struct {
storage *Set
idx int
}
// MergeIterator supports iterating over two sets of labels while
// eliminating duplicate values from the combined set. The first
// iterator value takes precedence.
// MergeIterator supports iterating over two sets of attributes while
// eliminating duplicate values from the combined set. The first iterator
// value takes precedence.
type MergeIterator struct {
one oneIterator
two oneIterator
@ -31,13 +31,13 @@ type MergeIterator struct {
}
type oneIterator struct {
iter Iterator
done bool
label KeyValue
iter Iterator
done bool
attr KeyValue
}
// Next moves the iterator to the next position. Returns false if there
// are no more labels.
// Next moves the iterator to the next position. Returns false if there are no
// more attributes.
func (i *Iterator) Next() bool {
i.idx++
return i.idx < i.Len()
@ -45,30 +45,41 @@ func (i *Iterator) Next() bool {
// Label returns current KeyValue. Must be called only after Next returns
// true.
//
// Deprecated: Use Attribute instead.
func (i *Iterator) Label() KeyValue {
return i.Attribute()
}
// Attribute returns the current KeyValue of the Iterator. It must be called
// only after Next returns true.
func (i *Iterator) Attribute() KeyValue {
kv, _ := i.storage.Get(i.idx)
return kv
}
// Attribute is a synonym for Label().
func (i *Iterator) Attribute() KeyValue {
return i.Label()
}
// IndexedLabel returns current index and attribute. Must be called only
// after Next returns true.
//
// Deprecated: Use IndexedAttribute instead.
func (i *Iterator) IndexedLabel() (int, KeyValue) {
return i.idx, i.Label()
return i.idx, i.Attribute()
}
// Len returns a number of labels in the iterator's `*Set`.
// IndexedAttribute returns current index and attribute. Must be called only
// after Next returns true.
func (i *Iterator) IndexedAttribute() (int, KeyValue) {
return i.idx, i.Attribute()
}
// Len returns a number of attributes in the iterated set.
func (i *Iterator) Len() int {
return i.storage.Len()
}
// ToSlice is a convenience function that creates a slice of labels
// from the passed iterator. The iterator is set up to start from the
// beginning before creating the slice.
// ToSlice is a convenience function that creates a slice of attributes from
// the passed iterator. The iterator is set up to start from the beginning
// before creating the slice.
func (i *Iterator) ToSlice() []KeyValue {
l := i.Len()
if l == 0 {
@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue {
i.idx = -1
slice := make([]KeyValue, 0, l)
for i.Next() {
slice = append(slice, i.Label())
slice = append(slice, i.Attribute())
}
return slice
}
// NewMergeIterator returns a MergeIterator for merging two label sets
// NewMergeIterator returns a MergeIterator for merging two attribute sets.
// Duplicates are resolved by taking the value from the first set.
func NewMergeIterator(s1, s2 *Set) MergeIterator {
mi := MergeIterator{
@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator {
func (oi *oneIterator) advance() {
if oi.done = !oi.iter.Next(); !oi.done {
oi.label = oi.iter.Label()
oi.attr = oi.iter.Attribute()
}
}
// Next returns true if there is another label available.
// Next returns true if there is another attribute available.
func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done {
return false
}
if m.one.done {
m.current = m.two.label
m.current = m.two.attr
m.two.advance()
return true
}
if m.two.done {
m.current = m.one.label
m.current = m.one.attr
m.one.advance()
return true
}
if m.one.label.Key == m.two.label.Key {
m.current = m.one.label // first iterator label value wins
if m.one.attr.Key == m.two.attr.Key {
m.current = m.one.attr // first iterator attribute value wins
m.one.advance()
m.two.advance()
return true
}
if m.one.label.Key < m.two.label.Key {
m.current = m.one.label
if m.one.attr.Key < m.two.attr.Key {
m.current = m.one.attr
m.one.advance()
return true
}
m.current = m.two.label
m.current = m.two.attr
m.two.advance()
return true
}
// Label returns the current value after Next() returns true.
//
// Deprecated: Use Attribute instead.
func (m *MergeIterator) Label() KeyValue {
return m.current
}
// Attribute returns the current value after Next() returns true.
func (m *MergeIterator) Attribute() KeyValue {
return m.current
}

View File

@ -20,10 +20,8 @@ type Key string
// Bool creates a KeyValue instance with a BOOL Value.
//
// If creating both key and a bool value at the same time, then
// instead of calling Key(name).Bool(value) consider using a
// convenience function provided by the api/key package -
// key.Bool(name, value).
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Bool(name, value).
func (k Key) Bool(v bool) KeyValue {
return KeyValue{
Key: k,
@ -31,51 +29,21 @@ func (k Key) Bool(v bool) KeyValue {
}
}
// Int64 creates a KeyValue instance with an INT64 Value.
// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
//
// If creating both key and an int64 value at the same time, then
// instead of calling Key(name).Int64(value) consider using a
// convenience function provided by the api/key package -
// key.Int64(name, value).
func (k Key) Int64(v int64) KeyValue {
// If creating both a key and value at the same time, use the provided
// convenience function instead -- BoolSlice(name, value).
func (k Key) BoolSlice(v []bool) KeyValue {
return KeyValue{
Key: k,
Value: Int64Value(v),
}
}
// Float64 creates a KeyValue instance with a FLOAT64 Value.
//
// If creating both key and a float64 value at the same time, then
// instead of calling Key(name).Float64(value) consider using a
// convenience function provided by the api/key package -
// key.Float64(name, value).
func (k Key) Float64(v float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64Value(v),
}
}
// String creates a KeyValue instance with a STRING Value.
//
// If creating both key and a string value at the same time, then
// instead of calling Key(name).String(value) consider using a
// convenience function provided by the api/key package -
// key.String(name, value).
func (k Key) String(v string) KeyValue {
return KeyValue{
Key: k,
Value: StringValue(v),
Value: BoolSliceValue(v),
}
}
// Int creates a KeyValue instance with an INT64 Value.
//
// If creating both key and an int value at the same time, then
// instead of calling Key(name).Int(value) consider using a
// convenience function provided by the api/key package -
// key.Int(name, value).
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int(name, value).
func (k Key) Int(v int) KeyValue {
return KeyValue{
Key: k,
@ -83,20 +51,84 @@ func (k Key) Int(v int) KeyValue {
}
}
// IntSlice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- IntSlice(name, value).
func (k Key) IntSlice(v []int) KeyValue {
return KeyValue{
Key: k,
Value: IntSliceValue(v),
}
}
// Int64 creates a KeyValue instance with an INT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64(name, value).
func (k Key) Int64(v int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64Value(v),
}
}
// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64Slice(name, value).
func (k Key) Int64Slice(v []int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64SliceValue(v),
}
}
// Float64 creates a KeyValue instance with a FLOAT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64(v float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64Value(v),
}
}
// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64Slice(v []float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64SliceValue(v),
}
}
// String creates a KeyValue instance with a STRING Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- String(name, value).
func (k Key) String(v string) KeyValue {
return KeyValue{
Key: k,
Value: StringValue(v),
}
}
// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- StringSlice(name, value).
func (k Key) StringSlice(v []string) KeyValue {
return KeyValue{
Key: k,
Value: StringSliceValue(v),
}
}
// Defined returns true for non-empty keys.
func (k Key) Defined() bool {
return len(k) != 0
}
// Array creates a KeyValue instance with a ARRAY Value.
//
// If creating both key and a array value at the same time, then
// instead of calling Key(name).String(value) consider using a
// convenience function provided by the api/key package -
// key.Array(name, value).
func (k Key) Array(v interface{}) KeyValue {
return KeyValue{
Key: k,
Value: ArrayValue(v),
}
}

View File

@ -15,9 +15,7 @@
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
"reflect"
)
// KeyValue holds a key and value pair.
@ -28,81 +26,61 @@ type KeyValue struct {
// Valid returns if kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key != "" && kv.Value.Type() != INVALID
return kv.Key.Defined() && kv.Value.Type() != INVALID
}
// Bool creates a new key-value pair with a passed name and a bool
// value.
// Bool creates a KeyValue with a BOOL Value type.
func Bool(k string, v bool) KeyValue {
return Key(k).Bool(v)
}
// Int64 creates a new key-value pair with a passed name and an int64
// value.
// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
func BoolSlice(k string, v []bool) KeyValue {
return Key(k).BoolSlice(v)
}
// Int creates a KeyValue with an INT64 Value type.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// IntSlice creates a KeyValue with an INT64SLICE Value type.
func IntSlice(k string, v []int) KeyValue {
return Key(k).IntSlice(v)
}
// Int64 creates a KeyValue with an INT64 Value type.
func Int64(k string, v int64) KeyValue {
return Key(k).Int64(v)
}
// Float64 creates a new key-value pair with a passed name and a float64
// value.
// Int64Slice creates a KeyValue with an INT64SLICE Value type.
func Int64Slice(k string, v []int64) KeyValue {
return Key(k).Int64Slice(v)
}
// Float64 creates a KeyValue with a FLOAT64 Value type.
func Float64(k string, v float64) KeyValue {
return Key(k).Float64(v)
}
// String creates a new key-value pair with a passed name and a string
// value.
// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
func Float64Slice(k string, v []float64) KeyValue {
return Key(k).Float64Slice(v)
}
// String creates a KeyValue with a STRING Value type.
func String(k, v string) KeyValue {
return Key(k).String(v)
}
// StringSlice creates a KeyValue with a STRINGSLICE Value type.
func StringSlice(k string, v []string) KeyValue {
return Key(k).StringSlice(v)
}
// Stringer creates a new key-value pair with a passed name and a string
// value generated by the passed Stringer interface.
func Stringer(k string, v fmt.Stringer) KeyValue {
return Key(k).String(v.String())
}
// Int creates a new key-value pair instance with a passed name and
// either an int32 or an int64 value, depending on whether the int
// type is 32 or 64 bits wide.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// Array creates a new key-value pair with a passed name and a array.
// Only arrays of primitive type are supported.
func Array(k string, v interface{}) KeyValue {
return Key(k).Array(v)
}
// Any creates a new key-value pair instance with a passed name and
// automatic type inference. This is slower, and not type-safe.
func Any(k string, value interface{}) KeyValue {
if value == nil {
return String(k, "<nil>")
}
if stringer, ok := value.(fmt.Stringer); ok {
return String(k, stringer.String())
}
rv := reflect.ValueOf(value)
switch rv.Kind() {
case reflect.Array, reflect.Slice:
return Array(k, value)
case reflect.Bool:
return Bool(k, rv.Bool())
case reflect.Int, reflect.Int8, reflect.Int16:
return Int(k, int(rv.Int()))
case reflect.Int64:
return Int64(k, rv.Int())
case reflect.Float64:
return Float64(k, rv.Float())
case reflect.String:
return String(k, rv.String())
}
if b, err := json.Marshal(value); b != nil && err == nil {
return String(k, string(b))
}
return String(k, fmt.Sprint(value))
}

View File

@ -18,57 +18,45 @@ import (
"encoding/json"
"reflect"
"sort"
"sync"
)
type (
// Set is the representation for a distinct label set. It
// manages an immutable set of labels, with an internal cache
// for storing label encodings.
// Set is the representation for a distinct attribute set. It manages an
// immutable set of attributes, with an internal cache for storing
// attribute encodings.
//
// This type supports the `Equivalent` method of comparison
// using values of type `Distinct`.
//
// This type is used to implement:
// 1. Metric labels
// 2. Resource sets
// 3. Correlation map (TODO)
// This type supports the Equivalent method of comparison using values of
// type Distinct.
Set struct {
equivalent Distinct
lock sync.Mutex
encoders [maxConcurrentEncoders]EncoderID
encoded [maxConcurrentEncoders]string
}
// Distinct wraps a variable-size array of `KeyValue`,
// constructed with keys in sorted order. This can be used as
// a map key or for equality checking between Sets.
// Distinct wraps a variable-size array of KeyValue, constructed with keys
// in sorted order. This can be used as a map key or for equality checking
// between Sets.
Distinct struct {
iface interface{}
}
// Filter supports removing certain labels from label sets.
// When the filter returns true, the label will be kept in
// the filtered label set. When the filter returns false, the
// label is excluded from the filtered label set, and the
// label instead appears in the `removed` list of excluded labels.
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
Filter func(KeyValue) bool
// Sortable implements `sort.Interface`, used for sorting
// `KeyValue`. This is an exported type to support a
// memory optimization. A pointer to one of these is needed
// for the call to `sort.Stable()`, which the caller may
// provide in order to avoid an allocation. See
// `NewSetWithSortable()`.
// Sortable implements sort.Interface, used for sorting KeyValue. This is
// an exported type to support a memory optimization. A pointer to one of
// these is needed for the call to sort.Stable(), which the caller may
// provide in order to avoid an allocation. See NewSetWithSortable().
Sortable []KeyValue
)
var (
// keyValueType is used in `computeDistinctReflect`.
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
// emptySet is returned for empty label sets.
// emptySet is returned for empty attribute sets.
emptySet = &Set{
equivalent: Distinct{
iface: [0]KeyValue{},
@ -76,8 +64,6 @@ var (
}
)
const maxConcurrentEncoders = 3
// EmptySet returns a reference to a Set with no elements.
//
// This is a convenience provided for optimized calling utility.
@ -85,30 +71,30 @@ func EmptySet() *Set {
return emptySet
}
// reflect abbreviates `reflect.ValueOf`.
func (d Distinct) reflect() reflect.Value {
// reflectValue abbreviates reflect.ValueOf(d).
func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
}
// Valid returns true if this value refers to a valid `*Set`.
// Valid returns true if this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
}
// Len returns the number of labels in this set.
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
if l == nil || !l.equivalent.Valid() {
return 0
}
return l.equivalent.reflect().Len()
return l.equivalent.reflectValue().Len()
}
// Get returns the KeyValue at ordered position `idx` in this set.
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
if l == nil {
return KeyValue{}, false
}
value := l.equivalent.reflect()
value := l.equivalent.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@ -124,7 +110,7 @@ func (l *Set) Value(k Key) (Value, bool) {
if l == nil {
return Value{}, false
}
rValue := l.equivalent.reflect()
rValue := l.equivalent.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@ -149,7 +135,7 @@ func (l *Set) HasValue(k Key) bool {
return ok
}
// Iter returns an iterator for visiting the labels in this set.
// Iter returns an iterator for visiting the attributes in this set.
func (l *Set) Iter() Iterator {
return Iterator{
storage: l,
@ -157,18 +143,17 @@ func (l *Set) Iter() Iterator {
}
}
// ToSlice returns the set of labels belonging to this set, sorted,
// where keys appear no more than once.
// ToSlice returns the set of attributes belonging to this set, sorted, where
// keys appear no more than once.
func (l *Set) ToSlice() []KeyValue {
iter := l.Iter()
return iter.ToSlice()
}
// Equivalent returns a value that may be used as a map key. The
// Distinct type guarantees that the result will equal the equivalent
// Distinct value of any label set with the same elements as this,
// where sets are made unique by choosing the last value in the input
// for any given key.
// Equivalent returns a value that may be used as a map key. The Distinct type
// guarantees that the result will equal the equivalent. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
if l == nil || !l.equivalent.Valid() {
return emptySet.equivalent
@ -181,52 +166,13 @@ func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
}
// Encoded returns the encoded form of this set, according to
// `encoder`. The result will be cached in this `*Set`.
// Encoded returns the encoded form of this set, according to encoder.
func (l *Set) Encoded(encoder Encoder) string {
if l == nil || encoder == nil {
return ""
}
id := encoder.ID()
if !id.Valid() {
// Invalid IDs are not cached.
return encoder.Encode(l.Iter())
}
var lookup *string
l.lock.Lock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
lookup = &l.encoded[idx]
break
}
}
l.lock.Unlock()
if lookup != nil {
return *lookup
}
r := encoder.Encode(l.Iter())
l.lock.Lock()
defer l.lock.Unlock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
return l.encoded[idx]
}
if !l.encoders[idx].Valid() {
l.encoders[idx] = id
l.encoded[idx] = r
return r
}
}
// TODO: This is a performance cliff. Find a way for this to
// generate a warning.
return r
return encoder.Encode(l.Iter())
}
func empty() Set {
@ -235,39 +181,38 @@ func empty() Set {
}
}
// NewSet returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// Except for empty sets, this method adds an additional allocation
// compared with calls that include a `*Sortable`.
// Except for empty sets, this method adds an additional allocation compared
// with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set {
// Check for empty set.
if len(kvs) == 0 {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
return s //nolint
return s
}
// NewSetWithSortable returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSetWithSortable returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// This call includes a `*Sortable` option as a memory optimization.
// This call includes a Sortable option as a memory optimization.
func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
// Check for empty set.
if len(kvs) == 0 {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
return s //nolint
return s
}
// NewSetWithFiltered returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSetWithFiltered returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// This call includes a `Filter` to include/exclude label keys from
// the return value. Excluded keys are returned as a slice of label
// values.
// This call includes a Filter to include/exclude attribute keys from the
// return value. Excluded keys are returned as a slice of attribute values.
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
@ -276,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
}
// NewSetWithSortableFiltered returns a new `Set`.
// NewSetWithSortableFiltered returns a new Set.
//
// Duplicate keys are eliminated by taking the last value. This
// re-orders the input slice so that unique last-values are contiguous
@ -288,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// - Caller sees the reordering, but doesn't lose values
// - Repeated call preserve last-value wins.
//
// Note that methods are defined on `*Set`, although this returns `Set`.
// Callers can avoid memory allocations by:
// Note that methods are defined on Set, although this returns Set. Callers
// can avoid memory allocations by:
//
// - allocating a `Sortable` for use as a temporary in this method
// - allocating a `Set` for storing the return value of this
// constructor.
// - allocating a Sortable for use as a temporary in this method
// - allocating a Set for storing the return value of this constructor.
//
// The result maintains a cache of encoded labels, by attribute.EncoderID.
// The result maintains a cache of encoded attributes, by attribute.EncoderID.
// This value should not be copied after its first use.
//
// The second `[]KeyValue` return value is a list of labels that were
// The second []KeyValue return value is a list of attributes that were
// excluded by the Filter (if non-nil).
func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
// Check for empty set.
@ -338,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
}, nil
}
// filterSet reorders `kvs` so that included keys are contiguous at
// the end of the slice, while excluded keys precede the included keys.
// filterSet reorders kvs so that included keys are contiguous at the end of
// the slice, while excluded keys precede the included keys.
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
var excluded []KeyValue
// Move labels that do not match the filter so
// they're adjacent before calling computeDistinct().
// Move attributes that do not match the filter so they're adjacent before
// calling computeDistinct().
distinctPosition := len(kvs)
// Swap indistinct keys forward and distinct keys toward the
@ -364,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
}, excluded
}
// Filter returns a filtered copy of this `Set`. See the
// documentation for `NewSetWithSortableFiltered` for more details.
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
return Set{
@ -378,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
return filterSet(l.ToSlice(), re)
}
// computeDistinct returns a `Distinct` using either the fixed- or
// reflect-oriented code path, depending on the size of the input.
// The input slice is assumed to already be sorted and de-duplicated.
// computeDistinct returns a Distinct using either the fixed- or
// reflect-oriented code path, depending on the size of the input. The input
// slice is assumed to already be sorted and de-duplicated.
func computeDistinct(kvs []KeyValue) Distinct {
iface := computeDistinctFixed(kvs)
if iface == nil {
@ -391,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct {
}
}
// computeDistinctFixed computes a `Distinct` for small slices. It
// returns nil if the input is too large for this code path.
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) {
case 1:
@ -440,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
}
}
// computeDistinctReflect computes a `Distinct` using reflection,
// works for any size input.
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
func computeDistinctReflect(kvs []KeyValue) interface{} {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
@ -450,22 +394,31 @@ func computeDistinctReflect(kvs []KeyValue) interface{} {
return at.Interface()
}
// MarshalJSON returns the JSON encoding of the `*Set`.
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
}
// Len implements `sort.Interface`.
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
}
return kvs
}
// Len implements sort.Interface.
func (l *Sortable) Len() int {
return len(*l)
}
// Swap implements `sort.Interface`.
// Swap implements sort.Interface.
func (l *Sortable) Swap(i, j int) {
(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
}
// Less implements `sort.Interface`.
// Less implements sort.Interface.
func (l *Sortable) Less(i, j int) bool {
return (*l)[i].Key < (*l)[j].Key
}

View File

@ -13,12 +13,15 @@ func _() {
_ = x[INT64-2]
_ = x[FLOAT64-3]
_ = x[STRING-4]
_ = x[ARRAY-5]
_ = x[BOOLSLICE-5]
_ = x[INT64SLICE-6]
_ = x[FLOAT64SLICE-7]
_ = x[STRINGSLICE-8]
}
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGARRAY"
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 34}
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {

View File

@ -17,7 +17,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"go.opentelemetry.io/otel/internal"
@ -26,16 +25,14 @@ import (
//go:generate stringer -type=Type
// Type describes the type of the data Value holds.
type Type int
type Type int // nolint: revive // redefines builtin Type.
// Value represents the value part in key-value pairs.
type Value struct {
vtype Type
numeric uint64
stringly string
// TODO Lazy value type?
array interface{}
slice interface{}
}
const (
@ -49,10 +46,14 @@ const (
FLOAT64
// STRING is a string Type Value.
STRING
// ARRAY is an array Type Value used to store 1-dimensional slices or
// arrays of bool, int, int32, int64, uint, uint32, uint64, float,
// float32, float64, or string types.
ARRAY
// BOOLSLICE is a slice of booleans Type Value.
BOOLSLICE
// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
INT64SLICE
// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
FLOAT64SLICE
// STRINGSLICE is a slice of strings Type Value.
STRINGSLICE
)
// BoolValue creates a BOOL Value.
@ -63,6 +64,33 @@ func BoolValue(v bool) Value {
}
}
// BoolSliceValue creates a BOOLSLICE Value.
func BoolSliceValue(v []bool) Value {
cp := make([]bool, len(v))
copy(cp, v)
return Value{
vtype: BOOLSLICE,
slice: &cp,
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
cp := make([]int64, 0, len(v))
for _, i := range v {
cp = append(cp, int64(i))
}
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Int64Value creates an INT64 Value.
func Int64Value(v int64) Value {
return Value{
@ -71,6 +99,16 @@ func Int64Value(v int64) Value {
}
}
// Int64SliceValue creates an INT64SLICE Value.
func Int64SliceValue(v []int64) Value {
cp := make([]int64, len(v))
copy(cp, v)
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Float64Value creates a FLOAT64 Value.
func Float64Value(v float64) Value {
return Value{
@ -79,6 +117,16 @@ func Float64Value(v float64) Value {
}
}
// Float64SliceValue creates a FLOAT64SLICE Value.
func Float64SliceValue(v []float64) Value {
cp := make([]float64, len(v))
copy(cp, v)
return Value{
vtype: FLOAT64SLICE,
slice: &cp,
}
}
// StringValue creates a STRING Value.
func StringValue(v string) Value {
return Value{
@ -87,38 +135,14 @@ func StringValue(v string) Value {
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// ArrayValue creates an ARRAY value from an array or slice.
// Only arrays or slices of bool, int, int64, float, float64, or string types are allowed.
// Specifically, arrays and slices can not contain other arrays, slices, structs, or non-standard
// types. If the passed value is not an array or slice of these types an
// INVALID value is returned.
func ArrayValue(v interface{}) Value {
switch reflect.TypeOf(v).Kind() {
case reflect.Array, reflect.Slice:
// get array type regardless of dimensions
typ := reflect.TypeOf(v).Elem()
kind := typ.Kind()
switch kind {
case reflect.Bool, reflect.Int, reflect.Int64,
reflect.Float64, reflect.String:
val := reflect.ValueOf(v)
length := val.Len()
frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ)))
reflect.Copy(frozen, val)
return Value{
vtype: ARRAY,
array: frozen.Interface(),
}
default:
return Value{vtype: INVALID}
}
// StringSliceValue creates a STRINGSLICE Value.
func StringSliceValue(v []string) Value {
cp := make([]string, len(v))
copy(cp, v)
return Value{
vtype: STRINGSLICE,
slice: &cp,
}
return Value{vtype: INVALID}
}
// Type returns a type of the Value.
@ -132,27 +156,58 @@ func (v Value) AsBool() bool {
return internal.RawToBool(v.numeric)
}
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
// BOOLSLICE.
func (v Value) AsBoolSlice() []bool {
if s, ok := v.slice.(*[]bool); ok {
return *s
}
return nil
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64.
func (v Value) AsInt64() int64 {
return internal.RawToInt64(v.numeric)
}
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsInt64Slice() []int64 {
if s, ok := v.slice.(*[]int64); ok {
return *s
}
return nil
}
// AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64.
func (v Value) AsFloat64() float64 {
return internal.RawToFloat64(v.numeric)
}
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// FLOAT64SLICE.
func (v Value) AsFloat64Slice() []float64 {
if s, ok := v.slice.(*[]float64); ok {
return *s
}
return nil
}
// AsString returns the string value. Make sure that the Value's type
// is STRING.
func (v Value) AsString() string {
return v.stringly
}
// AsArray returns the array Value as an interface{}.
func (v Value) AsArray() interface{} {
return v.array
// AsStringSlice returns the []string value. Make sure that the Value's type is
// STRINGSLICE.
func (v Value) AsStringSlice() []string {
if s, ok := v.slice.(*[]string); ok {
return *s
}
return nil
}
type unknownValueType struct{}
@ -160,16 +215,22 @@ type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
switch v.Type() {
case ARRAY:
return v.AsArray()
case BOOL:
return v.AsBool()
case BOOLSLICE:
return v.AsBoolSlice()
case INT64:
return v.AsInt64()
case INT64SLICE:
return v.AsInt64Slice()
case FLOAT64:
return v.AsFloat64()
case FLOAT64SLICE:
return v.AsFloat64Slice()
case STRING:
return v.stringly
case STRINGSLICE:
return v.AsStringSlice()
}
return unknownValueType{}
}
@ -177,14 +238,20 @@ func (v Value) AsInterface() interface{} {
// Emit returns a string representation of Value's data.
func (v Value) Emit() string {
switch v.Type() {
case ARRAY:
return fmt.Sprint(v.array)
case BOOLSLICE:
return fmt.Sprint(*(v.slice.(*[]bool)))
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
return fmt.Sprint(*(v.slice.(*[]int64)))
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
return fmt.Sprint(*(v.slice.(*[]float64)))
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
return fmt.Sprint(*(v.slice.(*[]string)))
case STRING:
return v.stringly
default:

562
vendor/go.opentelemetry.io/otel/baggage/baggage.go generated vendored Normal file
View File

@ -0,0 +1,562 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage"
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
)
const (
maxMembers = 180
maxBytesPerMembers = 4096
maxBytesPerBaggageString = 8192
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
)
var (
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
errInvalidKey = errors.New("invalid key")
errInvalidValue = errors.New("invalid value")
errInvalidProperty = errors.New("invalid baggage list-member property")
errInvalidMember = errors.New("invalid baggage list-member")
errMemberNumber = errors.New("too many list-members in baggage-string")
errMemberBytes = errors.New("list-member too large")
errBaggageBytes = errors.New("baggage-string too large")
)
// Property is an additional metadata entry for a baggage list-member.
type Property struct {
key, value string
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewKeyProperty returns a new Property for key.
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
if !keyRe.MatchString(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{key: key, hasData: true}
return p, nil
}
// NewKeyValueProperty returns a new Property for key with value.
//
// If key or value are invalid, an error will be returned.
func NewKeyValueProperty(key, value string) (Property, error) {
if !keyRe.MatchString(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
p := Property{
key: key,
value: value,
hasValue: true,
hasData: true,
}
return p, nil
}
func newInvalidProperty() Property {
return Property{}
}
// parseProperty attempts to decode a Property from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
func parseProperty(property string) (Property, error) {
if property == "" {
return newInvalidProperty(), nil
}
match := propertyRe.FindStringSubmatch(property)
if len(match) != 4 {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
p := Property{hasData: true}
if match[1] != "" {
p.key = match[1]
} else {
p.key = match[2]
p.value = match[3]
p.hasValue = true
}
return p, nil
}
// validate ensures p conforms to the W3C Baggage specification, returning an
// error otherwise.
func (p Property) validate() error {
errFunc := func(err error) error {
return fmt.Errorf("invalid property: %w", err)
}
if !p.hasData {
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
}
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if p.hasValue && !valueRe.MatchString(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
return nil
}
// Key returns the Property key.
func (p Property) Key() string {
return p.key
}
// Value returns the Property value. Additionally, a boolean value is returned
// indicating if the returned value is the empty if the Property has a value
// that is empty or if the value is not set.
func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
// String encodes Property into a string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
}
return p.key
}
type properties []Property
func fromInternalProperties(iProps []baggage.Property) properties {
if len(iProps) == 0 {
return nil
}
props := make(properties, len(iProps))
for i, p := range iProps {
props[i] = Property{
key: p.Key,
value: p.Value,
hasValue: p.HasValue,
}
}
return props
}
func (p properties) asInternal() []baggage.Property {
if len(p) == 0 {
return nil
}
iProps := make([]baggage.Property, len(p))
for i, prop := range p {
iProps[i] = baggage.Property{
Key: prop.key,
Value: prop.value,
HasValue: prop.hasValue,
}
}
return iProps
}
func (p properties) Copy() properties {
if len(p) == 0 {
return nil
}
props := make(properties, len(p))
copy(props, p)
return props
}
// validate ensures each Property in p conforms to the W3C Baggage
// specification, returning an error otherwise.
func (p properties) validate() error {
for _, prop := range p {
if err := prop.validate(); err != nil {
return err
}
}
return nil
}
// String encodes properties into a string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
for i, prop := range p {
props[i] = prop.String()
}
return strings.Join(props, propertyDelimiter)
}
// Member is a list-member of a baggage-string as defined by the W3C Baggage
// specification.
type Member struct {
key, value string
properties properties
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewMember returns a new Member from the passed arguments. An error is
// returned if the created Member would be invalid according to the W3C
// Baggage specification.
func NewMember(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
properties: properties(props).Copy(),
hasData: true,
}
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
return m, nil
}
func newInvalidMember() Member {
return Member{}
}
// parseMember attempts to decode a Member from the passed string. It returns
// an error if the input is invalid according to the W3C Baggage
// specification.
func parseMember(member string) (Member, error) {
if n := len(member); n > maxBytesPerMembers {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
var (
key, value string
props properties
)
parts := strings.SplitN(member, propertyDelimiter, 2)
switch len(parts) {
case 2:
// Parse the member properties.
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
}
props = append(props, p)
}
fallthrough
case 1:
// Parse the member key/value pair.
// Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(kv[0])
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
// the bug to slip past the CI checks.
panic("failed to parse baggage member")
}
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification, returning an
// error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
if !keyRe.MatchString(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate()
}
// Key returns the Member key.
func (m Member) Key() string { return m.key }
// Value returns the Member value.
func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
return s
}
// Baggage is a list of baggage members representing the baggage-string as
// defined by the W3C Baggage specification.
type Baggage struct { //nolint:golint
list baggage.List
}
// New returns a new valid Baggage. It returns an error if it results in a
// Baggage exceeding limits set in that specification.
//
// It expects all the provided members to have already been validated.
func New(members ...Member) (Baggage, error) {
if len(members) == 0 {
return Baggage{}, nil
}
b := make(baggage.List)
for _, m := range members {
if !m.hasData {
return Baggage{}, errInvalidMember
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// Check member numbers after deduplication.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
bag := Baggage{b}
if n := len(bag.String()); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
return bag, nil
}
// Parse attempts to decode a baggage-string from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
//
// If there are duplicate list-members contained in baggage, the last one
// defined (reading left-to-right) will be the only one kept. This diverges
// from the W3C Baggage specification which allows duplicate list-members, but
// conforms to the OpenTelemetry Baggage specification.
func Parse(bStr string) (Baggage, error) {
if bStr == "" {
return Baggage{}, nil
}
if n := len(bStr); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) {
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// OpenTelemetry does not allow for duplicate list-members, but the W3C
// specification does. Now that we have deduplicated, ensure the baggage
// does not exceed list-member limits.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
return Baggage{b}, nil
}
// Member returns the baggage list-member identified by key.
//
// If there is no list-member matching the passed key the returned Member will
// be a zero-value Member.
// The returned member is not validated, as we assume the validation happened
// when it was added to the Baggage.
func (b Baggage) Member(key string) Member {
v, ok := b.list[key]
if !ok {
// We do not need to worry about distinguishing between the situation
// where a zero-valued Member is included in the Baggage because a
// zero-valued Member is invalid according to the W3C Baggage
// specification (it has an empty key).
return newInvalidMember()
}
return Member{
key: key,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}
}
// Members returns all the baggage list-members.
// The order of the returned list-members does not have significance.
//
// The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage.
func (b Baggage) Members() []Member {
if len(b.list) == 0 {
return nil
}
members := make([]Member, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
})
}
return members
}
// SetMember returns a copy the Baggage with the member included. If the
// baggage contains a Member with the same key the existing Member is
// replaced.
//
// If member is invalid according to the W3C Baggage specification, an error
// is returned with the original Baggage.
func (b Baggage) SetMember(member Member) (Baggage, error) {
if !member.hasData {
return b, errInvalidMember
}
n := len(b.list)
if _, ok := b.list[member.key]; !ok {
n++
}
list := make(baggage.List, n)
for k, v := range b.list {
// Do not copy if we are just going to overwrite.
if k == member.key {
continue
}
list[k] = v
}
list[member.key] = baggage.Item{
Value: member.value,
Properties: member.properties.asInternal(),
}
return Baggage{list: list}, nil
}
// DeleteMember returns a copy of the Baggage with the list-member identified
// by key removed.
func (b Baggage) DeleteMember(key string) Baggage {
n := len(b.list)
if _, ok := b.list[key]; ok {
n--
}
list := make(baggage.List, n)
for k, v := range b.list {
if k == key {
continue
}
list[k] = v
}
return Baggage{list: list}
}
// Len returns the number of list-members in the Baggage.
func (b Baggage) Len() int {
return len(b.list)
}
// String encodes Baggage into a string compliant with the W3C Baggage
// specification. The returned string will be invalid if the Baggage contains
// any invalid list-members.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}.String())
}
return strings.Join(members, listDelimiter)
}

39
vendor/go.opentelemetry.io/otel/baggage/context.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage"
import (
"context"
"go.opentelemetry.io/otel/internal/baggage"
)
// ContextWithBaggage returns a copy of parent with baggage.
func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, b.list)
}
// ContextWithoutBaggage returns a copy of parent with no baggage.
func ContextWithoutBaggage(parent context.Context) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, nil)
}
// FromContext returns the baggage contained in ctx.
func FromContext(ctx context.Context) Baggage {
// Delegate so any hooks for the OpenTracing bridge are handled.
return Baggage{list: baggage.ListFromContext(ctx)}
}

20
vendor/go.opentelemetry.io/otel/baggage/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package baggage provides functionality for storing and retrieving
baggage items in Go context. For propagating the baggage, see the
go.opentelemetry.io/otel/propagation package.
*/
package baggage // import "go.opentelemetry.io/otel/baggage"

View File

@ -15,10 +15,6 @@
/*
Package codes defines the canonical error codes used by OpenTelemetry.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track
the evolving OpenTelemetry specification and user feedback.
It conforms to [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
*/

View File

@ -16,10 +16,6 @@
Package otel provides global access to the OpenTelemetry API. The subpackages of
the otel package provide an implementation of the OpenTelemetry API.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
The provided API is used to instrument code and measure data about that code's
performance and operation. The measured data, by default, is not processed or
transmitted anywhere. An implementation of the OpenTelemetry SDK, like the

View File

@ -16,7 +16,23 @@ package otel // import "go.opentelemetry.io/otel"
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
// ErrorHandlerFunc is a convenience adapter to allow the use of a function
// as an ErrorHandler.
type ErrorHandlerFunc func(error)
var _ ErrorHandler = ErrorHandlerFunc(nil)
// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
func (f ErrorHandlerFunc) Handle(err error) {
f(err)
}

View File

@ -1,31 +0,0 @@
# OpenTelemetry Collector Go Exporter
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp)
This exporter exports OpenTelemetry spans and metrics to the OpenTelemetry Collector.
## Installation and Setup
The exporter can be installed using standard `go` functionality.
```bash
$ go get -u go.opentelemetry.io/otel/exporters/otlp
```
A new exporter can be created using the `NewExporter` function.
## Retries
The exporter will not, by default, retry failed requests to the collector.
However, it is configured in a way that it can be easily enabled.
To enable retries, the `GRPC_GO_RETRY` environment variable needs to be set to `on`. For example,
```
GRPC_GO_RETRY=on go run .
```
The [default service config](https://github.com/grpc/proposal/blob/master/A6-client-retries.md) used by default is defined to retry failed requests with exponential backoff (`0.3seconds * (2)^retry`) with [a max of `5` retries](https://github.com/open-telemetry/oteps/blob/be2a3fcbaa417ebbf5845cd485d34fdf0ab4a2a4/text/0035-opentelemetry-protocol.md#export-response)).
These retries are only attempted for reponses that are [deemed "retry-able" by the collector](https://github.com/grpc/proposal/blob/master/A6-client-retries.md#validation-of-retrypolicy).

View File

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal contains common functionality for all OTLP exporters.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import (
"fmt"
"path"
"strings"
)
// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
func CleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
}
return tmp
}

View File

@ -0,0 +1,148 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// ConfigFn is the generic function used to set a config.
type ConfigFn func(*EnvOptionsReader)
// EnvOptionsReader reads the required environment variables.
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(string) ([]byte, error)
Namespace string
}
// Apply runs every ConfigFn.
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
for _, o := range opts {
o(e)
}
}
// GetEnvValue gets an OTLP environment variable value of the specified key
// using the GetEnv function.
// This function prepends the OTLP specified namespace to all key lookups.
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
return v, v != ""
}
// WithString retrieves the specified config and passes it to ConfigFn as a string.
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(v)
}
}
}
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if d, err := strconv.Atoi(v); err == nil {
fn(time.Duration(d) * time.Millisecond)
}
}
}
}
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(stringToHeader(v))
}
}
}
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if u, err := url.Parse(v); err == nil {
fn(u)
}
}
}
}
// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config.
func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if b, err := e.ReadFile(v); err == nil {
if c, err := createTLSConfig(b); err == nil {
fn(c)
}
}
}
}
}
func keyWithNamespace(ns, key string) string {
if ns == "" {
return key
}
return fmt.Sprintf("%s_%s", ns, key)
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}
func createTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -1,196 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/url"
"os"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel"
)
func ApplyGRPCEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyGRPCEnvConfigs(cfg)
}
func ApplyHTTPEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyHTTPEnvConfigs(cfg)
}
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(filename string) ([]byte, error)
}
func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyHTTPOption(cfg)
}
}
func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyGRPCOption(cfg)
}
}
func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption {
var opts []GenericOption
// Endpoint
if v, ok := e.getEnvValue("ENDPOINT"); ok {
opts = append(opts, WithEndpoint(v))
}
if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok {
opts = append(opts, WithTracesEndpoint(v))
}
if v, ok := e.getEnvValue("METRICS_ENDPOINT"); ok {
opts = append(opts, WithMetricsEndpoint(v))
}
// Certificate File
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTracesTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("METRICS_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithMetricsTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp metrics exporter certificate '%s': %w", path, err))
}
}
// Headers
if h, ok := e.getEnvValue("HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("TRACES_HEADERS"); ok {
opts = append(opts, WithTracesHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("METRICS_HEADERS"); ok {
opts = append(opts, WithMetricsHeaders(stringToHeader(h)))
}
// Compression
if c, ok := e.getEnvValue("COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok {
opts = append(opts, WithTracesCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("METRICS_COMPRESSION"); ok {
opts = append(opts, WithMetricsCompression(stringToCompression(c)))
}
// Timeout
if t, ok := e.getEnvValue("TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTracesTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("METRICS_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithMetricsTimeout(time.Duration(d)*time.Millisecond))
}
}
return opts
}
// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function.
// This function already prepends the OTLP prefix to all key lookup.
func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key)))
return v, v != ""
}
func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) {
b, err := e.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
func stringToCompression(value string) otlp.Compression {
switch value {
case "gzip":
return otlp.GzipCompression
}
return otlp.NoCompression
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}

View File

@ -1,376 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel/exporters/otlp"
)
const (
// DefaultMaxAttempts describes how many times the driver
// should retry the sending of the payload in case of a
// retryable error.
DefaultMaxAttempts int = 5
// DefaultTracesPath is a default URL path for endpoint that
// receives spans.
DefaultTracesPath string = "/v1/traces"
// DefaultMetricsPath is a default URL path for endpoint that
// receives metrics.
DefaultMetricsPath string = "/v1/metrics"
// DefaultBackoff is a default base backoff time used in the
// exponential backoff strategy.
DefaultBackoff time.Duration = 300 * time.Millisecond
// DefaultTimeout is a default max waiting time for the backend to process
// each span or metrics batch.
DefaultTimeout time.Duration = 10 * time.Second
// DefaultServiceConfig is the gRPC service config used if none is
// provided by the user.
DefaultServiceConfig = `{
"methodConfig":[{
"name":[
{ "service":"opentelemetry.proto.collector.metrics.v1.MetricsService" },
{ "service":"opentelemetry.proto.collector.trace.v1.TraceService" }
],
"retryPolicy":{
"MaxAttempts":5,
"InitialBackoff":"0.3s",
"MaxBackoff":"5s",
"BackoffMultiplier":2,
"RetryableStatusCodes":[
"CANCELLED",
"DEADLINE_EXCEEDED",
"RESOURCE_EXHAUSTED",
"ABORTED",
"OUT_OF_RANGE",
"UNAVAILABLE",
"DATA_LOSS"
]
}
}]
}`
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression otlp.Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
}
Config struct {
// Signal specific configurations
Metrics SignalConfig
Traces SignalConfig
// General configurations
MaxAttempts int
Backoff time.Duration
// HTTP configuration
Marshaler otlp.Marshaler
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
}
)
func NewDefaultConfig() Config {
c := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
URLPath: DefaultTracesPath,
Compression: otlp.NoCompression,
Timeout: DefaultTimeout,
},
Metrics: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
URLPath: DefaultMetricsPath,
Compression: otlp.NoCompression,
Timeout: DefaultTimeout,
},
MaxAttempts: DefaultMaxAttempts,
Backoff: DefaultBackoff,
ServiceConfig: DefaultServiceConfig,
}
return c
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(*Config)
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(*Config)
}
func (g *genericOption) ApplyGRPCOption(cfg *Config) {
g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg *Config) {
g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg *Config)) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(*Config)
grpcFn func(*Config)
}
func (g *splitOption) ApplyGRPCOption(cfg *Config) {
g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg *Config) {
g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg *Config), grpcFn func(cfg *Config)) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(*Config)
}
func (h *httpOption) ApplyHTTPOption(cfg *Config) {
h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg *Config)) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(*Config)
}
func (h *grpcOption) ApplyGRPCOption(cfg *Config) {
h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg *Config)) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Endpoint = endpoint
cfg.Metrics.Endpoint = endpoint
})
}
func WithTracesEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Endpoint = endpoint
})
}
func WithMetricsEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Endpoint = endpoint
})
}
func WithCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Compression = compression
cfg.Metrics.Compression = compression
})
}
func WithTracesCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Compression = compression
})
}
func WithMetricsCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Compression = compression
})
}
func WithTracesURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.URLPath = urlPath
})
}
func WithMetricsURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.URLPath = urlPath
})
}
func WithMaxAttempts(maxAttempts int) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.MaxAttempts = maxAttempts
})
}
func WithBackoff(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Backoff = duration
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Traces.TLSCfg = tlsCfg.Clone()
cfg.Metrics.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithTracesTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Traces.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithMetricsTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Metrics.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Insecure = true
cfg.Metrics.Insecure = true
})
}
func WithInsecureTraces() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Insecure = true
})
}
func WithInsecureMetrics() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Insecure = true
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Headers = headers
cfg.Metrics.Headers = headers
})
}
func WithTracesHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Headers = headers
})
}
func WithMetricsHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Headers = headers
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Timeout = duration
cfg.Metrics.Timeout = duration
})
}
func WithTracesTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Timeout = duration
})
}
func WithMetricsTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Timeout = duration
})
}

View File

@ -1,69 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
)
const (
WeakCertificate = `
-----BEGIN CERTIFICATE-----
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
Lhnm4N/QDk5rek0=
-----END CERTIFICATE-----
`
WeakPrivateKey = `
-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
-----END PRIVATE KEY-----
`
)
// ReadTLSConfigFromFile reads a PEM certificate file and creates
// a tls.Config that will use this certifate to verify a server certificate.
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,68 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import "fmt"
// PartialSuccessDropKind indicates the kind of partial success error
// received by an OTLP exporter, which corresponds with the signal
// being exported.
type PartialSuccessDropKind string
const (
// TracingPartialSuccess indicates that some spans were rejected.
TracingPartialSuccess PartialSuccessDropKind = "spans"
// MetricsPartialSuccess indicates that some metric data points were rejected.
MetricsPartialSuccess PartialSuccessDropKind = "metric data points"
)
// PartialSuccess represents the underlying error for all handling
// OTLP partial success messages. Use `errors.Is(err,
// PartialSuccess{})` to test whether an error passed to the OTel
// error handler belongs to this category.
type PartialSuccess struct {
ErrorMessage string
RejectedItems int64
RejectedKind PartialSuccessDropKind
}
var _ error = PartialSuccess{}
// Error implements the error interface.
func (ps PartialSuccess) Error() string {
msg := ps.ErrorMessage
if msg == "" {
msg = "empty message"
}
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
}
// Is supports the errors.Is() interface.
func (ps PartialSuccess) Is(err error) bool {
_, ok := err.(PartialSuccess)
return ok
}
// PartialSuccessToError produces an error suitable for passing to
// `otel.Handle()` out of the fields in a partial success response,
// independent of which signal produced the outcome.
func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error {
return PartialSuccess{
ErrorMessage: errorMessage,
RejectedItems: itemsRejected,
RejectedKind: kind,
}
}

View File

@ -0,0 +1,150 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
// explicit throttle responses received.
package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
import (
"context"
"fmt"
"time"
"github.com/cenkalti/backoff/v4"
)
// DefaultConfig are the recommended defaults to use.
var DefaultConfig = Config{
Enabled: true,
InitialInterval: 5 * time.Second,
MaxInterval: 30 * time.Second,
MaxElapsedTime: time.Minute,
}
// Config defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type Config struct {
// Enabled indicates whether to not retry sending batches in case of
// export failure.
Enabled bool
// InitialInterval the time to wait after the first failure before
// retrying.
InitialInterval time.Duration
// MaxInterval is the upper bound on backoff interval. Once this value is
// reached the delay between consecutive retries will always be
// `MaxInterval`.
MaxInterval time.Duration
// MaxElapsedTime is the maximum amount of time (including retries) spent
// trying to send a request/batch. Once this value is reached, the data
// is discarded.
MaxElapsedTime time.Duration
}
// RequestFunc wraps a request with retry logic.
type RequestFunc func(context.Context, func(context.Context) error) error
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
// duration should be honored that was included in the error.
//
// The function must return true if the error argument is retry-able,
// otherwise it must return false for the first return parameter.
//
// The function must return a non-zero time.Duration if the error contains
// explicit throttle duration that should be honored, otherwise it must return
// a zero valued time.Duration.
type EvaluateFunc func(error) (bool, time.Duration)
// RequestFunc returns a RequestFunc using the evaluate function to determine
// if requests can be retried and based on the exponential backoff
// configuration of c.
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
if !c.Enabled {
return func(ctx context.Context, fn func(context.Context) error) error {
return fn(ctx)
}
}
// Do not use NewExponentialBackOff since it calls Reset and the code here
// must call Reset after changing the InitialInterval (this saves an
// unnecessary call to Now).
b := &backoff.ExponentialBackOff{
InitialInterval: c.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
MaxElapsedTime: c.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
b.Reset()
return func(ctx context.Context, fn func(context.Context) error) error {
for {
err := fn(ctx)
if err == nil {
return nil
}
retryable, throttle := evaluate(err)
if !retryable {
return err
}
bOff := b.NextBackOff()
if bOff == backoff.Stop {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
var delay time.Duration
if bOff > throttle {
delay = bOff
} else {
elapsed := b.GetElapsedTime()
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
return fmt.Errorf("max retry time would elapse: %w", err)
}
delay = throttle
}
if err := waitFunc(ctx, delay); err != nil {
return err
}
}
}
}
// Allow override for testing.
var waitFunc = wait
func wait(ctx context.Context, delay time.Duration) error {
timer := time.NewTimer(delay)
defer timer.Stop()
select {
case <-ctx.Done():
// Handle the case where the timer and context deadline end
// simultaneously by prioritizing the timer expiration nil value
// response.
select {
case <-timer.C:
default:
return ctx.Err()
}
case <-timer.C:
}
return nil
}

View File

@ -1,141 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"reflect"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
"go.opentelemetry.io/otel/sdk/resource"
)
// Attributes transforms a slice of KeyValues into a slice of OTLP attribute key-values.
func Attributes(attrs []attribute.KeyValue) []*commonpb.KeyValue {
if len(attrs) == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, len(attrs))
for _, kv := range attrs {
out = append(out, toAttribute(kv))
}
return out
}
// ResourceAttributes transforms a Resource into a slice of OTLP attribute key-values.
func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue {
if resource.Len() == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, resource.Len())
for iter := resource.Iter(); iter.Next(); {
out = append(out, toAttribute(iter.Attribute()))
}
return out
}
func toAttribute(v attribute.KeyValue) *commonpb.KeyValue {
result := &commonpb.KeyValue{
Key: string(v.Key),
Value: new(commonpb.AnyValue),
}
switch v.Value.Type() {
case attribute.BOOL:
result.Value.Value = &commonpb.AnyValue_BoolValue{
BoolValue: v.Value.AsBool(),
}
case attribute.INT64:
result.Value.Value = &commonpb.AnyValue_IntValue{
IntValue: v.Value.AsInt64(),
}
case attribute.FLOAT64:
result.Value.Value = &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Value.AsFloat64(),
}
case attribute.STRING:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: v.Value.AsString(),
}
case attribute.ARRAY:
result.Value.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: arrayValues(v),
},
}
default:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
}
}
return result
}
func arrayValues(kv attribute.KeyValue) []*commonpb.AnyValue {
a := kv.Value.AsArray()
aType := reflect.TypeOf(a)
var valueFunc func(reflect.Value) *commonpb.AnyValue
switch aType.Elem().Kind() {
case reflect.Bool:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v.Bool(),
},
}
}
case reflect.Int, reflect.Int64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v.Int(),
},
}
}
case reflect.Uintptr:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: int64(v.Uint()),
},
}
}
case reflect.Float64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Float(),
},
}
}
case reflect.String:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v.String(),
},
}
}
}
results := make([]*commonpb.AnyValue, aType.Len())
for i, aValue := 0, reflect.ValueOf(a); i < aValue.Len(); i++ {
results[i] = valueFunc(aValue.Index(i))
}
return results
}

View File

@ -1,631 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transform provides translations for opentelemetry-go concepts and
// structures to otlp structures.
package transform
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
)
var (
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
// aggregator is attempted.
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
// ErrIncompatibleAgg is returned when
// aggregation.Kind implies an interface conversion that has
// failed
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
// ErrUnknownValueType is returned when a transformation of an unknown value
// is attempted.
ErrUnknownValueType = errors.New("invalid value type")
// ErrContextCanceled is returned when a context cancellation halts a
// transformation.
ErrContextCanceled = errors.New("context canceled")
// ErrTransforming is returned when an unexected error is encoutered transforming.
ErrTransforming = errors.New("transforming failed")
)
// result is the product of transforming Records into OTLP Metrics.
type result struct {
Resource *resource.Resource
InstrumentationLibrary instrumentation.Library
Metric *metricpb.Metric
Err error
}
// toNanos returns the number of nanoseconds since the UNIX epoch.
func toNanos(t time.Time) uint64 {
if t.IsZero() {
return 0
}
return uint64(t.UnixNano())
}
// CheckpointSet transforms all records contained in a checkpoint into
// batched OTLP ResourceMetrics.
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
records, errc := source(ctx, exportSelector, cps)
// Start a fixed number of goroutines to transform records.
transformed := make(chan result)
var wg sync.WaitGroup
wg.Add(int(numWorkers))
for i := uint(0); i < numWorkers; i++ {
go func() {
defer wg.Done()
transformer(ctx, exportSelector, records, transformed)
}()
}
go func() {
wg.Wait()
close(transformed)
}()
// Synchronously collect the transformed records and transmit.
rms, err := sink(ctx, transformed)
if err != nil {
return nil, err
}
// source is complete, check for any errors.
if err := <-errc; err != nil {
return nil, err
}
return rms, nil
}
// source starts a goroutine that sends each one of the Records yielded by
// the CheckpointSet on the returned chan. Any error encoutered will be sent
// on the returned error chan after seeding is complete.
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
errc := make(chan error, 1)
out := make(chan export.Record)
// Seed records into process.
go func() {
defer close(out)
// No select is needed since errc is buffered.
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
select {
case <-ctx.Done():
return ErrContextCanceled
case out <- r:
}
return nil
})
}()
return out, errc
}
// transformer transforms records read from the passed in chan into
// OTLP Metrics which are sent on the out chan.
func transformer(ctx context.Context, exportSelector export.ExportKindSelector, in <-chan export.Record, out chan<- result) {
for r := range in {
m, err := Record(exportSelector, r)
// Propagate errors, but do not send empty results.
if err == nil && m == nil {
continue
}
res := result{
Resource: r.Resource(),
InstrumentationLibrary: instrumentation.Library{
Name: r.Descriptor().InstrumentationName(),
Version: r.Descriptor().InstrumentationVersion(),
},
Metric: m,
Err: err,
}
select {
case <-ctx.Done():
return
case out <- res:
}
}
}
// sink collects transformed Records and batches them.
//
// Any errors encoutered transforming input will be reported with an
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
// caller to handle any incorrect data in these ResourceMetrics.
func sink(ctx context.Context, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
var errStrings []string
type resourceBatch struct {
Resource *resourcepb.Resource
// Group by instrumentation library name and then the MetricDescriptor.
InstrumentationLibraryBatches map[instrumentation.Library]map[string]*metricpb.Metric
}
// group by unique Resource string.
grouped := make(map[attribute.Distinct]resourceBatch)
for res := range in {
if res.Err != nil {
errStrings = append(errStrings, res.Err.Error())
continue
}
rID := res.Resource.Equivalent()
rb, ok := grouped[rID]
if !ok {
rb = resourceBatch{
Resource: Resource(res.Resource),
InstrumentationLibraryBatches: make(map[instrumentation.Library]map[string]*metricpb.Metric),
}
grouped[rID] = rb
}
mb, ok := rb.InstrumentationLibraryBatches[res.InstrumentationLibrary]
if !ok {
mb = make(map[string]*metricpb.Metric)
rb.InstrumentationLibraryBatches[res.InstrumentationLibrary] = mb
}
mID := res.Metric.GetName()
m, ok := mb[mID]
if !ok {
mb[mID] = res.Metric
continue
}
switch res.Metric.Data.(type) {
case *metricpb.Metric_IntGauge:
m.GetIntGauge().DataPoints = append(m.GetIntGauge().DataPoints, res.Metric.GetIntGauge().DataPoints...)
case *metricpb.Metric_IntHistogram:
m.GetIntHistogram().DataPoints = append(m.GetIntHistogram().DataPoints, res.Metric.GetIntHistogram().DataPoints...)
case *metricpb.Metric_IntSum:
m.GetIntSum().DataPoints = append(m.GetIntSum().DataPoints, res.Metric.GetIntSum().DataPoints...)
case *metricpb.Metric_DoubleGauge:
m.GetDoubleGauge().DataPoints = append(m.GetDoubleGauge().DataPoints, res.Metric.GetDoubleGauge().DataPoints...)
case *metricpb.Metric_DoubleHistogram:
m.GetDoubleHistogram().DataPoints = append(m.GetDoubleHistogram().DataPoints, res.Metric.GetDoubleHistogram().DataPoints...)
case *metricpb.Metric_DoubleSum:
m.GetDoubleSum().DataPoints = append(m.GetDoubleSum().DataPoints, res.Metric.GetDoubleSum().DataPoints...)
default:
}
}
if len(grouped) == 0 {
return nil, nil
}
var rms []*metricpb.ResourceMetrics
for _, rb := range grouped {
rm := &metricpb.ResourceMetrics{Resource: rb.Resource}
for il, mb := range rb.InstrumentationLibraryBatches {
ilm := &metricpb.InstrumentationLibraryMetrics{
Metrics: make([]*metricpb.Metric, 0, len(mb)),
}
if il != (instrumentation.Library{}) {
ilm.InstrumentationLibrary = &commonpb.InstrumentationLibrary{
Name: il.Name,
Version: il.Version,
}
}
for _, m := range mb {
ilm.Metrics = append(ilm.Metrics, m)
}
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
}
rms = append(rms, rm)
}
// Report any transform errors.
if len(errStrings) > 0 {
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
}
return rms, nil
}
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
// error is returned if the Record Aggregator is not supported.
func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricpb.Metric, error) {
agg := r.Aggregation()
switch agg.Kind() {
case aggregation.MinMaxSumCountKind:
mmsc, ok := agg.(aggregation.MinMaxSumCount)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return minMaxSumCount(r, mmsc)
case aggregation.HistogramKind:
h, ok := agg.(aggregation.Histogram)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return histogramPoint(r, exportSelector.ExportKindFor(r.Descriptor(), aggregation.HistogramKind), h)
case aggregation.SumKind:
s, ok := agg.(aggregation.Sum)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
sum, err := s.Sum()
if err != nil {
return nil, err
}
return sumPoint(r, sum, r.StartTime(), r.EndTime(), exportSelector.ExportKindFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
case aggregation.LastValueKind:
lv, ok := agg.(aggregation.LastValue)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
value, tm, err := lv.LastValue()
if err != nil {
return nil, err
}
return gaugePoint(r, value, time.Time{}, tm)
case aggregation.ExactKind:
e, ok := agg.(aggregation.Points)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
pts, err := e.Points()
if err != nil {
return nil, err
}
return gaugeArray(r, pts)
default:
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
}
}
func gaugeArray(record export.Record, points []aggregation.Point) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
pbLabels := stringKeyValues(labels.Iter())
switch nk := desc.NumberKind(); nk {
case number.Int64Kind:
var pts []*metricpb.IntDataPoint
for _, s := range points {
pts = append(pts, &metricpb.IntDataPoint{
Labels: pbLabels,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: s.Number.CoerceToInt64(nk),
})
}
m.Data = &metricpb.Metric_IntGauge{
IntGauge: &metricpb.IntGauge{
DataPoints: pts,
},
}
case number.Float64Kind:
var pts []*metricpb.DoubleDataPoint
for _, s := range points {
pts = append(pts, &metricpb.DoubleDataPoint{
Labels: pbLabels,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: s.Number.CoerceToFloat64(nk),
})
}
m.Data = &metricpb.Metric_DoubleGauge{
DoubleGauge: &metricpb.DoubleGauge{
DataPoints: pts,
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, nk)
}
return m, nil
}
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntGauge{
IntGauge: &metricpb.IntGauge{
DataPoints: []*metricpb.IntDataPoint{
{
Value: num.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleGauge{
DoubleGauge: &metricpb.DoubleGauge{
DataPoints: []*metricpb.DoubleDataPoint{
{
Value: num.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
func exportKindToTemporality(ek export.ExportKind) metricpb.AggregationTemporality {
switch ek {
case export.DeltaExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
case export.CumulativeExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
}
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
func sumPoint(record export.Record, num number.Number, start, end time.Time, ek export.ExportKind, monotonic bool) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntSum{
IntSum: &metricpb.IntSum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.IntDataPoint{
{
Value: num.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleSum{
DoubleSum: &metricpb.DoubleSum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.DoubleDataPoint{
{
Value: num.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discrete values.
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum number.Number, count uint64, err error) {
if min, err = a.Min(); err != nil {
return
}
if max, err = a.Max(); err != nil {
return
}
if sum, err = a.Sum(); err != nil {
return
}
if count, err = a.Count(); err != nil {
return
}
return
}
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
min, max, sum, count, err := minMaxSumCountValues(a)
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
buckets := []uint64{min.AsRaw(), max.AsRaw()}
bounds := []float64{0.0, 100.0}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntHistogram{
IntHistogram: &metricpb.IntHistogram{
DataPoints: []*metricpb.IntHistogramDataPoint{
{
Sum: sum.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: buckets,
ExplicitBounds: bounds,
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleHistogram{
DoubleHistogram: &metricpb.DoubleHistogram{
DataPoints: []*metricpb.DoubleHistogramDataPoint{
{
Sum: sum.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: buckets,
ExplicitBounds: bounds,
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
var buckets aggregation.Buckets
if buckets, err = a.Histogram(); err != nil {
return
}
boundaries, counts = buckets.Boundaries, buckets.Counts
if len(counts) != len(boundaries)+1 {
err = ErrTransforming
return
}
return
}
// histogram transforms a Histogram Aggregator into an OTLP Metric.
func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Histogram) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
boundaries, counts, err := histogramValues(a)
if err != nil {
return nil, err
}
count, err := a.Count()
if err != nil {
return nil, err
}
sum, err := a.Sum()
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntHistogram{
IntHistogram: &metricpb.IntHistogram{
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.IntHistogramDataPoint{
{
Sum: sum.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: counts,
ExplicitBounds: boundaries,
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleHistogram{
DoubleHistogram: &metricpb.DoubleHistogram{
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.DoubleHistogramDataPoint{
{
Sum: sum.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: counts,
ExplicitBounds: boundaries,
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
// stringKeyValues transforms a label iterator into an OTLP StringKeyValues.
func stringKeyValues(iter attribute.Iterator) []*commonpb.StringKeyValue {
l := iter.Len()
if l == 0 {
return nil
}
result := make([]*commonpb.StringKeyValue, 0, l)
for iter.Next() {
kv := iter.Label()
result = append(result, &commonpb.StringKeyValue{
Key: string(kv.Key),
Value: kv.Value.Emit(),
})
}
return result
}

View File

@ -1,45 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
)
const (
// DefaultCollectorPort is the port the Exporter will attempt connect to
// if no collector port is provided.
DefaultCollectorPort uint16 = 4317
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// ExporterOption are setting options passed to an Exporter on creation.
type ExporterOption func(*config)
type config struct {
exportKindSelector metricsdk.ExportKindSelector
}
// WithMetricExportKindSelector defines the ExportKindSelector used
// for selecting AggregationTemporality (i.e., Cumulative vs. Delta
// aggregation). If not specified otherwise, exporter will use a
// cumulative export kind selector.
func WithMetricExportKindSelector(selector metricsdk.ExportKindSelector) ExporterOption {
return func(cfg *config) {
cfg.exportKindSelector = selector
}
}

View File

@ -1,179 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
// Exporter is an OpenTelemetry exporter. It exports both traces and metrics
// from OpenTelemetry instrumented to code using OpenTelemetry protocol
// buffers to a configurable receiver.
type Exporter struct {
cfg config
driver ProtocolDriver
mu sync.RWMutex
started bool
startOnce sync.Once
stopOnce sync.Once
}
var _ tracesdk.SpanExporter = (*Exporter)(nil)
var _ metricsdk.Exporter = (*Exporter)(nil)
// NewExporter constructs a new Exporter and starts it.
func NewExporter(ctx context.Context, driver ProtocolDriver, opts ...ExporterOption) (*Exporter, error) {
exp := NewUnstartedExporter(driver, opts...)
if err := exp.Start(ctx); err != nil {
return nil, err
}
return exp, nil
}
// NewUnstartedExporter constructs a new Exporter and does not start it.
func NewUnstartedExporter(driver ProtocolDriver, opts ...ExporterOption) *Exporter {
cfg := config{
// Note: the default ExportKindSelector is specified
// as Cumulative:
// https://github.com/open-telemetry/opentelemetry-specification/issues/731
exportKindSelector: metricsdk.CumulativeExportKindSelector(),
}
for _, opt := range opts {
opt(&cfg)
}
return &Exporter{
cfg: cfg,
driver: driver,
}
}
var (
errAlreadyStarted = errors.New("already started")
)
// Start establishes connections to the OpenTelemetry collector. Starting an
// already started exporter returns an error.
func (e *Exporter) Start(ctx context.Context) error {
var err = errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
e.mu.Unlock()
err = e.driver.Start(ctx)
})
return err
}
// Shutdown closes all connections and releases resources currently being used
// by the exporter. If the exporter is not started this does nothing. A shut
// down exporter can't be started again. Shutting down an already shut down
// exporter does nothing.
func (e *Exporter) Shutdown(ctx context.Context) error {
e.mu.RLock()
started := e.started
e.mu.RUnlock()
if !started {
return nil
}
var err error
e.stopOnce.Do(func() {
err = e.driver.Stop(ctx)
e.mu.Lock()
e.started = false
e.mu.Unlock()
})
return err
}
// Export transforms and batches metric Records into OTLP Metrics and
// transmits them to the configured collector.
func (e *Exporter) Export(parent context.Context, cps metricsdk.CheckpointSet) error {
return e.driver.ExportMetrics(parent, cps, e.cfg.exportKindSelector)
}
// ExportKindFor reports back to the OpenTelemetry SDK sending this Exporter
// metric telemetry that it needs to be provided in a configured format.
func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) metricsdk.ExportKind {
return e.cfg.exportKindSelector.ExportKindFor(desc, kind)
}
// ExportSpans transforms and batches trace SpanSnapshots into OTLP Trace and
// transmits them to the configured collector.
func (e *Exporter) ExportSpans(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
return e.driver.ExportTraces(ctx, ss)
}
// NewExportPipeline sets up a complete export pipeline
// with the recommended TracerProvider setup.
func NewExportPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,
*sdktrace.TracerProvider, *basic.Controller, error) {
exp, err := NewExporter(ctx, driver, exporterOpts...)
if err != nil {
return nil, nil, nil, err
}
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
cntr := basic.New(
processor.New(
simple.NewWithInexpensiveDistribution(),
exp,
),
)
return exp, tracerProvider, cntr, nil
}
// InstallNewPipeline instantiates a NewExportPipeline with the
// recommended configuration and registers it globally.
func InstallNewPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,
*sdktrace.TracerProvider, *basic.Controller, error) {
exp, tp, cntr, err := NewExportPipeline(ctx, driver, exporterOpts...)
if err != nil {
return nil, nil, nil, err
}
otel.SetTracerProvider(tp)
err = cntr.Start(ctx)
if err != nil {
return nil, nil, nil, err
}
return exp, tp, cntr, err
}

View File

@ -1,278 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc
import (
"context"
"math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
"google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type connection struct {
// Ensure pointer is 64-bit aligned for atomic operations on both 32 and 64 bit machines.
lastConnectErrPtr unsafe.Pointer
// mu protects the connection as it is accessed by the
// exporter goroutines and background connection goroutine
mu sync.Mutex
cc *grpc.ClientConn
// these fields are read-only after constructor is finished
cfg otlpconfig.Config
sCfg otlpconfig.SignalConfig
metadata metadata.MD
newConnectionHandler func(cc *grpc.ClientConn)
// these channels are created once
disconnectedCh chan bool
backgroundConnectionDoneCh chan struct{}
stopCh chan struct{}
// this is for tests, so they can replace the closing
// routine without a worry of modifying some global variable
// or changing it back to original after the test is done
closeBackgroundConnectionDoneCh func(ch chan struct{})
}
func newConnection(cfg otlpconfig.Config, sCfg otlpconfig.SignalConfig, handler func(cc *grpc.ClientConn)) *connection {
c := new(connection)
c.newConnectionHandler = handler
c.cfg = cfg
c.sCfg = sCfg
if len(c.sCfg.Headers) > 0 {
c.metadata = metadata.New(c.sCfg.Headers)
}
c.closeBackgroundConnectionDoneCh = func(ch chan struct{}) {
close(ch)
}
return c
}
func (c *connection) startConnection(ctx context.Context) {
c.stopCh = make(chan struct{})
c.disconnectedCh = make(chan bool, 1)
c.backgroundConnectionDoneCh = make(chan struct{})
if err := c.connect(ctx); err == nil {
c.setStateConnected()
} else {
c.setStateDisconnected(err)
}
go c.indefiniteBackgroundConnection()
}
func (c *connection) lastConnectError() error {
errPtr := (*error)(atomic.LoadPointer(&c.lastConnectErrPtr))
if errPtr == nil {
return nil
}
return *errPtr
}
func (c *connection) saveLastConnectError(err error) {
var errPtr *error
if err != nil {
errPtr = &err
}
atomic.StorePointer(&c.lastConnectErrPtr, unsafe.Pointer(errPtr))
}
func (c *connection) setStateDisconnected(err error) {
c.saveLastConnectError(err)
select {
case c.disconnectedCh <- true:
default:
}
c.newConnectionHandler(nil)
}
func (c *connection) setStateConnected() {
c.saveLastConnectError(nil)
}
func (c *connection) connected() bool {
return c.lastConnectError() == nil
}
const defaultConnReattemptPeriod = 10 * time.Second
func (c *connection) indefiniteBackgroundConnection() {
defer func() {
c.closeBackgroundConnectionDoneCh(c.backgroundConnectionDoneCh)
}()
connReattemptPeriod := c.cfg.ReconnectionPeriod
if connReattemptPeriod <= 0 {
connReattemptPeriod = defaultConnReattemptPeriod
}
// No strong seeding required, nano time can
// already help with pseudo uniqueness.
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
// maxJitterNanos: 70% of the connectionReattemptPeriod
maxJitterNanos := int64(0.7 * float64(connReattemptPeriod))
for {
// Otherwise these will be the normal scenarios to enable
// reconnection if we trip out.
// 1. If we've stopped, return entirely
// 2. Otherwise block until we are disconnected, and
// then retry connecting
select {
case <-c.stopCh:
return
case <-c.disconnectedCh:
// Quickly check if we haven't stopped at the
// same time.
select {
case <-c.stopCh:
return
default:
}
// Normal scenario that we'll wait for
}
if err := c.connect(context.Background()); err == nil {
c.setStateConnected()
} else {
// this code is unreachable in most cases
// c.connect does not establish connection
c.setStateDisconnected(err)
}
// Apply some jitter to avoid lockstep retrials of other
// collector-exporters. Lockstep retrials could result in an
// innocent DDOS, by clogging the machine's resources and network.
jitter := time.Duration(rng.Int63n(maxJitterNanos))
select {
case <-c.stopCh:
return
case <-time.After(connReattemptPeriod + jitter):
}
}
}
func (c *connection) connect(ctx context.Context) error {
cc, err := c.dialToCollector(ctx)
if err != nil {
return err
}
c.setConnection(cc)
c.newConnectionHandler(cc)
return nil
}
// setConnection sets cc as the client connection and returns true if
// the connection state changed.
func (c *connection) setConnection(cc *grpc.ClientConn) bool {
c.mu.Lock()
defer c.mu.Unlock()
// If previous clientConn is same as the current then just return.
// This doesn't happen right now as this func is only called with new ClientConn.
// It is more about future-proofing.
if c.cc == cc {
return false
}
// If the previous clientConn was non-nil, close it
if c.cc != nil {
_ = c.cc.Close()
}
c.cc = cc
return true
}
func (c *connection) dialToCollector(ctx context.Context) (*grpc.ClientConn, error) {
dialOpts := []grpc.DialOption{}
if c.cfg.ServiceConfig != "" {
dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(c.cfg.ServiceConfig))
}
if c.sCfg.GRPCCredentials != nil {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(c.sCfg.GRPCCredentials))
} else if c.sCfg.Insecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
if c.sCfg.Compression == otlp.GzipCompression {
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(c.cfg.DialOptions) != 0 {
dialOpts = append(dialOpts, c.cfg.DialOptions...)
}
ctx, cancel := c.contextWithStop(ctx)
defer cancel()
ctx = c.contextWithMetadata(ctx)
return grpc.DialContext(ctx, c.sCfg.Endpoint, dialOpts...)
}
func (c *connection) contextWithMetadata(ctx context.Context) context.Context {
if c.metadata.Len() > 0 {
return metadata.NewOutgoingContext(ctx, c.metadata)
}
return ctx
}
func (c *connection) shutdown(ctx context.Context) error {
close(c.stopCh)
// Ensure that the backgroundConnector returns
select {
case <-c.backgroundConnectionDoneCh:
case <-ctx.Done():
return ctx.Err()
}
c.mu.Lock()
cc := c.cc
c.cc = nil
c.mu.Unlock()
if cc != nil {
return cc.Close()
}
return nil
}
func (c *connection) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
// Unify the parent context Done signal with the connection's
// stop channel.
ctx, cancel := context.WithCancel(ctx)
go func(ctx context.Context, cancel context.CancelFunc) {
select {
case <-ctx.Done():
// Nothing to do, either cancelled or deadline
// happened.
case <-c.stopCh:
cancel()
}
}(ctx, cancel)
return ctx, cancel
}

View File

@ -1,25 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package otlpgrpc provides an implementation of otlp.ProtocolDriver
that connects to the collector and sends traces and metrics using
gRPC.
This package is currently in a pre-GA phase. Backwards incompatible
changes may be introduced in subsequent minor version releases as we
work to track the evolving OpenTelemetry specification and user
feedback.
*/
package otlpgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"

View File

@ -1,195 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/transform"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type driver struct {
metricsDriver metricsDriver
tracesDriver tracesDriver
}
type metricsDriver struct {
connection *connection
lock sync.Mutex
metricsClient colmetricpb.MetricsServiceClient
}
type tracesDriver struct {
connection *connection
lock sync.Mutex
tracesClient coltracepb.TraceServiceClient
}
var (
errNoClient = errors.New("no client")
)
// NewDriver creates a new gRPC protocol driver.
func NewDriver(opts ...Option) otlp.ProtocolDriver {
cfg := otlpconfig.NewDefaultConfig()
otlpconfig.ApplyGRPCEnvConfigs(&cfg)
for _, opt := range opts {
opt.ApplyGRPCOption(&cfg)
}
d := &driver{}
d.tracesDriver = tracesDriver{
connection: newConnection(cfg, cfg.Traces, d.tracesDriver.handleNewConnection),
}
d.metricsDriver = metricsDriver{
connection: newConnection(cfg, cfg.Metrics, d.metricsDriver.handleNewConnection),
}
return d
}
func (md *metricsDriver) handleNewConnection(cc *grpc.ClientConn) {
md.lock.Lock()
defer md.lock.Unlock()
if cc != nil {
md.metricsClient = colmetricpb.NewMetricsServiceClient(cc)
} else {
md.metricsClient = nil
}
}
func (td *tracesDriver) handleNewConnection(cc *grpc.ClientConn) {
td.lock.Lock()
defer td.lock.Unlock()
if cc != nil {
td.tracesClient = coltracepb.NewTraceServiceClient(cc)
} else {
td.tracesClient = nil
}
}
// Start implements otlp.ProtocolDriver. It establishes a connection
// to the collector.
func (d *driver) Start(ctx context.Context) error {
d.tracesDriver.connection.startConnection(ctx)
d.metricsDriver.connection.startConnection(ctx)
return nil
}
// Stop implements otlp.ProtocolDriver. It shuts down the connection
// to the collector.
func (d *driver) Stop(ctx context.Context) error {
if err := d.tracesDriver.connection.shutdown(ctx); err != nil {
return err
}
return d.metricsDriver.connection.shutdown(ctx)
}
// ExportMetrics implements otlp.ProtocolDriver. It transforms metrics
// to protobuf binary format and sends the result to the collector.
func (d *driver) ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
if !d.metricsDriver.connection.connected() {
return fmt.Errorf("metrics exporter is disconnected from the server %s: %w", d.metricsDriver.connection.sCfg.Endpoint, d.metricsDriver.connection.lastConnectError())
}
ctx, cancel := d.metricsDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.metricsDriver.connection.sCfg.Timeout)
defer tCancel()
rms, err := transform.CheckpointSet(ctx, selector, cps, 1)
if err != nil {
return err
}
if len(rms) == 0 {
return nil
}
return d.metricsDriver.uploadMetrics(ctx, rms)
}
func (md *metricsDriver) uploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error {
ctx = md.connection.contextWithMetadata(ctx)
err := func() error {
md.lock.Lock()
defer md.lock.Unlock()
if md.metricsClient == nil {
return errNoClient
}
_, err := md.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: protoMetrics,
})
return err
}()
if err != nil {
md.connection.setStateDisconnected(err)
}
return err
}
// ExportTraces implements otlp.ProtocolDriver. It transforms spans to
// protobuf binary format and sends the result to the collector.
func (d *driver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
if !d.tracesDriver.connection.connected() {
return fmt.Errorf("traces exporter is disconnected from the server %s: %w", d.tracesDriver.connection.sCfg.Endpoint, d.tracesDriver.connection.lastConnectError())
}
ctx, cancel := d.tracesDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.tracesDriver.connection.sCfg.Timeout)
defer tCancel()
protoSpans := transform.SpanData(ss)
if len(protoSpans) == 0 {
return nil
}
return d.tracesDriver.uploadTraces(ctx, protoSpans)
}
func (td *tracesDriver) uploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
ctx = td.connection.contextWithMetadata(ctx)
err := func() error {
td.lock.Lock()
defer td.lock.Unlock()
if td.tracesClient == nil {
return errNoClient
}
_, err := td.tracesClient.Export(ctx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
})
return err
}()
if err != nil {
td.connection.setStateDisconnected(err)
}
return err
}

View File

@ -1,202 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc
import (
"fmt"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// Option applies an option to the gRPC driver.
type Option interface {
otlpconfig.GRPCOption
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() Option {
return otlpconfig.WithInsecure()
}
// WithTracesInsecure disables client transport security for the traces exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithTracesInsecure() Option {
return otlpconfig.WithInsecureTraces()
}
// WithInsecureMetrics disables client transport security for the metrics exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecureMetrics() Option {
return otlpconfig.WithInsecureMetrics()
}
// WithEndpoint allows one to set the endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithEndpoint(endpoint string) Option {
return otlpconfig.WithEndpoint(endpoint)
}
// WithTracesEndpoint allows one to set the traces endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithTracesEndpoint(endpoint string) Option {
return otlpconfig.WithTracesEndpoint(endpoint)
}
// WithMetricsEndpoint allows one to set the metrics endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithMetricsEndpoint(endpoint string) Option {
return otlpconfig.WithMetricsEndpoint(endpoint)
}
// WithReconnectionPeriod allows one to set the delay between next connection attempt
// after failing to connect with the collector.
func WithReconnectionPeriod(rp time.Duration) otlpconfig.GRPCOption {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ReconnectionPeriod = rp
})
}
func compressorToCompression(compressor string) otlp.Compression {
switch compressor {
case "gzip":
return otlp.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlp.NoCompression
}
// WithCompressor will set the compressor for the gRPC client to use when sending requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithCompressor(compressor string) Option {
return otlpconfig.WithCompression(compressorToCompression(compressor))
}
// WithTracesCompression will set the compressor for the gRPC client to use when sending traces requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithTracesCompression(compressor string) Option {
return otlpconfig.WithTracesCompression(compressorToCompression(compressor))
}
// WithMetricsCompression will set the compressor for the gRPC client to use when sending metrics requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithMetricsCompression(compressor string) Option {
return otlpconfig.WithMetricsCompression(compressorToCompression(compressor))
}
// WithHeaders will send the provided headers with gRPC requests.
func WithHeaders(headers map[string]string) Option {
return otlpconfig.WithHeaders(headers)
}
// WithTracesHeaders will send the provided headers with gRPC traces requests.
func WithTracesHeaders(headers map[string]string) Option {
return otlpconfig.WithTracesHeaders(headers)
}
// WithMetricsHeaders will send the provided headers with gRPC metrics requests.
func WithMetricsHeaders(headers map[string]string) Option {
return otlpconfig.WithMetricsHeaders(headers)
}
// WithTLSCredentials allows the connection to use TLS credentials
// when talking to the server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Traces.GRPCCredentials = creds
cfg.Metrics.GRPCCredentials = creds
})
}
// WithTracesTLSCredentials allows the connection to use TLS credentials
// when talking to the traces server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTracesTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Traces.GRPCCredentials = creds
})
}
// WithMetricsTLSCredentials allows the connection to use TLS credentials
// when talking to the metrics server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithMetricsTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Metrics.GRPCCredentials = creds
})
}
// WithServiceConfig defines the default gRPC service config used.
func WithServiceConfig(serviceConfig string) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ServiceConfig = serviceConfig
})
}
// WithDialOption opens support to any grpc.DialOption to be used. If it conflicts
// with some other configuration the GRPC specified via the collector the ones here will
// take preference since they are set last.
func WithDialOption(opts ...grpc.DialOption) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.DialOptions = opts
})
}
// WithTimeout tells the driver the max waiting time for the backend to process
// each spans or metrics batch. If unset, the default will be 10 seconds.
func WithTimeout(duration time.Duration) Option {
return otlpconfig.WithTimeout(duration)
}
// WithTracesTimeout tells the driver the max waiting time for the backend to process
// each spans batch. If unset, the default will be 10 seconds.
func WithTracesTimeout(duration time.Duration) Option {
return otlpconfig.WithTracesTimeout(duration)
}
// WithMetricsTimeout tells the driver the max waiting time for the backend to process
// each metrics batch. If unset, the default will be 10 seconds.
func WithMetricsTimeout(duration time.Duration) Option {
return otlpconfig.WithMetricsTimeout(duration)
}

View File

@ -0,0 +1,51 @@
# OpenTelemetry-Go OTLP Span Exporter
[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation.
## Installation
```
go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
```
## Examples
- [Exporter setup and examples](./otlptracehttp/example_test.go)
- [Full example sending telemetry to a local collector](../../../example/otel-collector)
## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
This exporter is configured using a client satisfying the `otlptrace.Client` interface.
This client handles the transformation of data into wire format and the transmission of that data to the collector.
## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
## Configuration
### Environment Variables
The following environment variables can be used (instead of options objects) to
override the default configuration. For more information about how each of
these environment variables is interpreted, see [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md).
| Environment variable | Option | Default value |
| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | |
| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | |
| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | |
| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` |
[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
Configuration using options have precedence over the environment variables.

View File

@ -0,0 +1,54 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
import (
"context"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
// Client manages connections to the collector, handles the
// transformation of data into wire format, and the transmission of that
// data to the collector.
type Client interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
Start(ctx context.Context) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with UploadTraces, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
Stop(ctx context.Context) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// UploadTraces should transform the passed traces to the wire
// format and send it to the collector. May be called
// concurrently.
UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}

View File

@ -0,0 +1,113 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
var (
errAlreadyStarted = errors.New("already started")
)
// Exporter exports trace data in the OTLP wire format.
type Exporter struct {
client Client
mu sync.RWMutex
started bool
startOnce sync.Once
stopOnce sync.Once
}
// ExportSpans exports a batch of spans.
func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
protoSpans := tracetransform.Spans(ss)
if len(protoSpans) == 0 {
return nil
}
return e.client.UploadTraces(ctx, protoSpans)
}
// Start establishes a connection to the receiving endpoint.
func (e *Exporter) Start(ctx context.Context) error {
var err = errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
e.mu.Unlock()
err = e.client.Start(ctx)
})
return err
}
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
func (e *Exporter) Shutdown(ctx context.Context) error {
e.mu.RLock()
started := e.started
e.mu.RUnlock()
if !started {
return nil
}
var err error
e.stopOnce.Do(func() {
err = e.client.Stop(ctx)
e.mu.Lock()
e.started = false
e.mu.Unlock()
})
return err
}
var _ tracesdk.SpanExporter = (*Exporter)(nil)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, client Client) (*Exporter, error) {
exp := NewUnstarted(client)
if err := exp.Start(ctx); err != nil {
return nil, err
}
return exp, nil
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(client Client) *Exporter {
return &Exporter{
client: client,
}
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct {
Type string
Client Client
}{
Type: "otlptrace",
Client: e.client,
}
}

View File

@ -0,0 +1,127 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"net/url"
"os"
"path"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
)
// DefaultEnvOptionsReader is the default environments reader.
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: os.ReadFile,
Namespace: "OTEL_EXPORTER_OTLP",
}
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
func ApplyGRPCEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
return cfg
}
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
func ApplyHTTPEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
return cfg
}
func getOptionsFromEnv() []GenericOption {
opts := []GenericOption{}
DefaultEnvOptionsReader.Apply(
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Traces.Endpoint = u.Host
// For OTLP/HTTP endpoint URLs without a per-signal
// configuration, the passed endpoint is used as a base URL
// and the signals are sent to these paths relative to that.
cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Traces.Endpoint = u.Host
// For endpoint URLs for OTLP/HTTP per-signal variables, the
// URL MUST be used as-is without any modification. The only
// exception is that if an URL contains no path part, the root
// path / MUST be used.
path := u.Path
if path == "" {
path = "/"
}
cfg.Traces.URLPath = path
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
)
return opts
}
func withEndpointScheme(u *url.URL) GenericOption {
switch strings.ToLower(u.Scheme) {
case "http", "unix":
return WithInsecure()
default:
return WithSecure()
}
}
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
return func(cfg Config) Config {
// For OTLP/gRPC endpoints, this is the target to which the
// exporter is going to send telemetry.
cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
return cfg
}
}
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
cp := NoCompression
if v == "gzip" {
cp = GzipCompression
}
fn(cp)
}
}
}

View File

@ -0,0 +1,307 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
)
const (
// DefaultTracesPath is a default URL path for endpoint that
// receives spans.
DefaultTracesPath string = "/v1/traces"
// DefaultTimeout is a default max waiting time for the backend to process
// each span batch.
DefaultTimeout time.Duration = 10 * time.Second
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
}
Config struct {
// Signal specific configurations
Traces SignalConfig
RetryConfig retry.Config
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
GRPCConn *grpc.ClientConn
}
)
// NewHTTPConfig returns a new Config with all settings applied from opts and
// any unset setting using the default HTTP config values.
func NewHTTPConfig(opts ...HTTPOption) Config {
cfg := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
URLPath: DefaultTracesPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyHTTPEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath)
return cfg
}
// NewGRPCConfig returns a new Config with all settings applied from opts and
// any unset setting using the default gRPC config values.
func NewGRPCConfig(opts ...GRPCOption) Config {
cfg := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
URLPath: DefaultTracesPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
}
// Priroritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Traces.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
} else if cfg.Traces.Insecure {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
} else {
// Default to using the host's root CA.
creds := credentials.NewTLS(nil)
cfg.Traces.GRPCCredentials = creds
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
}
if cfg.Traces.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(cfg.DialOptions) != 0 {
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
}
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
MinConnectTimeout: cfg.ReconnectionPeriod,
}
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
}
return cfg
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(Config) Config
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(Config) Config
}
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
return g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
return g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg Config) Config) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(Config) Config
grpcFn func(Config) Config
}
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
return g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
return g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(Config) Config
}
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
return h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(Config) Config
}
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
return h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint
return cfg
})
}
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
return cfg
})
}
func WithURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.URLPath = urlPath
return cfg
})
}
func WithRetry(rc retry.Config) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.RetryConfig = rc
return cfg
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg Config) Config {
cfg.Traces.TLSCfg = tlsCfg.Clone()
return cfg
}, func(cfg Config) Config {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
return cfg
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = true
return cfg
})
}
func WithSecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = false
return cfg
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Headers = headers
return cfg
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Timeout = duration
return cfg
})
}

View File

@ -12,7 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
const (
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
DefaultCollectorGRPCPort uint16 = 4317
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
DefaultCollectorHTTPPort uint16 = 4318
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// Compression describes the compression used for payloads sent to the
// collector.
@ -27,7 +37,7 @@ const (
GzipCompression
)
// Marshaler describes the kind of message format sent to the collector
// Marshaler describes the kind of message format sent to the collector.
type Marshaler int
const (

View File

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
)
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,158 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
if len(attrs) == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, len(attrs))
for _, kv := range attrs {
out = append(out, KeyValue(kv))
}
return out
}
// Iterator transforms an attribute iterator into OTLP key-values.
func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
l := iter.Len()
if l == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, l)
for iter.Next() {
out = append(out, KeyValue(iter.Attribute()))
}
return out
}
// ResourceAttributes transforms a Resource OTLP key-values.
func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
return Iterator(res.Iter())
}
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
}
// Value transforms an attribute Value into an OTLP AnyValue.
func Value(v attribute.Value) *commonpb.AnyValue {
av := new(commonpb.AnyValue)
switch v.Type() {
case attribute.BOOL:
av.Value = &commonpb.AnyValue_BoolValue{
BoolValue: v.AsBool(),
}
case attribute.BOOLSLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: boolSliceValues(v.AsBoolSlice()),
},
}
case attribute.INT64:
av.Value = &commonpb.AnyValue_IntValue{
IntValue: v.AsInt64(),
}
case attribute.INT64SLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: int64SliceValues(v.AsInt64Slice()),
},
}
case attribute.FLOAT64:
av.Value = &commonpb.AnyValue_DoubleValue{
DoubleValue: v.AsFloat64(),
}
case attribute.FLOAT64SLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: float64SliceValues(v.AsFloat64Slice()),
},
}
case attribute.STRING:
av.Value = &commonpb.AnyValue_StringValue{
StringValue: v.AsString(),
}
case attribute.STRINGSLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: stringSliceValues(v.AsStringSlice()),
},
}
default:
av.Value = &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
}
}
return av
}
func boolSliceValues(vals []bool) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v,
},
}
}
return converted
}
func int64SliceValues(vals []int64) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v,
},
}
}
return converted
}
func float64SliceValues(vals []float64) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v,
},
}
}
return converted
}
func stringSliceValues(vals []string) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v,
},
}
}
return converted
}

View File

@ -12,19 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
"go.opentelemetry.io/otel/sdk/instrumentation"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
func instrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary {
if il == (instrumentation.Library{}) {
func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
if il == (instrumentation.Scope{}) {
return nil
}
return &commonpb.InstrumentationLibrary{
return &commonpb.InstrumentationScope{
Name: il.Name,
Version: il.Version,
}

View File

@ -12,12 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/sdk/resource"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
)
// Resource transforms a Resource into an OTLP Resource.

View File

@ -12,36 +12,31 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"go.opentelemetry.io/otel/sdk/instrumentation"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
const (
maxMessageEventsPerSpan = 128
)
// SpanData transforms a slice of SpanSnapshot into a slice of OTLP
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
// ResourceSpans.
func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
if len(sdl) == 0 {
return nil
}
rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
type ilsKey struct {
type key struct {
r attribute.Distinct
il instrumentation.Library
is instrumentation.Scope
}
ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans)
ssm := make(map[key]*tracepb.ScopeSpans)
var resources int
for _, sd := range sdl {
@ -49,29 +44,31 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
continue
}
rKey := sd.Resource.Equivalent()
iKey := ilsKey{
rKey := sd.Resource().Equivalent()
k := key{
r: rKey,
il: sd.InstrumentationLibrary,
is: sd.InstrumentationScope(),
}
ils, iOk := ilsm[iKey]
scopeSpan, iOk := ssm[k]
if !iOk {
// Either the resource or instrumentation library were unknown.
ils = &tracepb.InstrumentationLibrarySpans{
InstrumentationLibrary: instrumentationLibrary(sd.InstrumentationLibrary),
Spans: []*tracepb.Span{},
// Either the resource or instrumentation scope were unknown.
scopeSpan = &tracepb.ScopeSpans{
Scope: InstrumentationScope(sd.InstrumentationScope()),
Spans: []*tracepb.Span{},
SchemaUrl: sd.InstrumentationScope().SchemaURL,
}
}
ils.Spans = append(ils.Spans, span(sd))
ilsm[iKey] = ils
scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
ssm[k] = scopeSpan
rs, rOk := rsm[rKey]
if !rOk {
resources++
// The resource was unknown.
rs = &tracepb.ResourceSpans{
Resource: Resource(sd.Resource),
InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils},
Resource: Resource(sd.Resource()),
ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
SchemaUrl: sd.Resource().SchemaURL(),
}
rsm[rKey] = rs
continue
@ -81,9 +78,9 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
// library lookup was unknown because if so we need to add it to the
// ResourceSpans. Otherwise, the instrumentation library has already
// been seen and the append we did above will be included it in the
// InstrumentationLibrarySpans reference.
// ScopeSpans reference.
if !iOk {
rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils)
rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
}
}
@ -96,32 +93,32 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
}
// span transforms a Span into an OTLP span.
func span(sd *tracesdk.SpanSnapshot) *tracepb.Span {
func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
if sd == nil {
return nil
}
tid := sd.SpanContext.TraceID()
sid := sd.SpanContext.SpanID()
tid := sd.SpanContext().TraceID()
sid := sd.SpanContext().SpanID()
s := &tracepb.Span{
TraceId: tid[:],
SpanId: sid[:],
TraceState: sd.SpanContext.TraceState().String(),
Status: status(sd.StatusCode, sd.StatusMessage),
StartTimeUnixNano: uint64(sd.StartTime.UnixNano()),
EndTimeUnixNano: uint64(sd.EndTime.UnixNano()),
Links: links(sd.Links),
Kind: spanKind(sd.SpanKind),
Name: sd.Name,
Attributes: Attributes(sd.Attributes),
Events: spanEvents(sd.MessageEvents),
DroppedAttributesCount: uint32(sd.DroppedAttributeCount),
DroppedEventsCount: uint32(sd.DroppedMessageEventCount),
DroppedLinksCount: uint32(sd.DroppedLinkCount),
TraceState: sd.SpanContext().TraceState().String(),
Status: status(sd.Status().Code, sd.Status().Description),
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
Links: links(sd.Links()),
Kind: spanKind(sd.SpanKind()),
Name: sd.Name(),
Attributes: KeyValues(sd.Attributes()),
Events: spanEvents(sd.Events()),
DroppedAttributesCount: uint32(sd.DroppedAttributes()),
DroppedEventsCount: uint32(sd.DroppedEvents()),
DroppedLinksCount: uint32(sd.DroppedLinks()),
}
if psid := sd.Parent.SpanID(); psid.IsValid() {
if psid := sd.Parent().SpanID(); psid.IsValid() {
s.ParentSpanId = psid[:]
}
@ -132,10 +129,12 @@ func span(sd *tracesdk.SpanSnapshot) *tracepb.Span {
func status(status codes.Code, message string) *tracepb.Status {
var c tracepb.Status_StatusCode
switch status {
case codes.Ok:
c = tracepb.Status_STATUS_CODE_OK
case codes.Error:
c = tracepb.Status_STATUS_CODE_ERROR
default:
c = tracepb.Status_STATUS_CODE_OK
c = tracepb.Status_STATUS_CODE_UNSET
}
return &tracepb.Status{
Code: c,
@ -144,7 +143,7 @@ func status(status codes.Code, message string) *tracepb.Status {
}
// links transforms span Links to OTLP span links.
func links(links []trace.Link) []*tracepb.Span_Link {
func links(links []tracesdk.Link) []*tracepb.Span_Link {
if len(links) == 0 {
return nil
}
@ -155,47 +154,35 @@ func links(links []trace.Link) []*tracepb.Span_Link {
// being reused -- in short we need a new otLink per iteration.
otLink := otLink
tid := otLink.TraceID()
sid := otLink.SpanID()
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
sl = append(sl, &tracepb.Span_Link{
TraceId: tid[:],
SpanId: sid[:],
Attributes: Attributes(otLink.Attributes),
TraceId: tid[:],
SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
})
}
return sl
}
// spanEvents transforms span Events to an OTLP span events.
func spanEvents(es []trace.Event) []*tracepb.Span_Event {
func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
if len(es) == 0 {
return nil
}
evCount := len(es)
if evCount > maxMessageEventsPerSpan {
evCount = maxMessageEventsPerSpan
}
events := make([]*tracepb.Span_Event, 0, evCount)
messageEvents := 0
events := make([]*tracepb.Span_Event, len(es))
// Transform message events
for _, e := range es {
if messageEvents >= maxMessageEventsPerSpan {
break
for i := 0; i < len(es); i++ {
events[i] = &tracepb.Span_Event{
Name: es[i].Name,
TimeUnixNano: uint64(es[i].Time.UnixNano()),
Attributes: KeyValues(es[i].Attributes),
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
}
messageEvents++
events = append(events,
&tracepb.Span_Event{
Name: e.Name,
TimeUnixNano: uint64(e.Time.UnixNano()),
Attributes: Attributes(e.Attributes),
// TODO (rghetia) : Add Drop Counts when supported.
},
)
}
return events
}

View File

@ -0,0 +1,295 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"context"
"errors"
"sync"
"time"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type client struct {
endpoint string
dialOpts []grpc.DialOption
metadata metadata.MD
exportTimeout time.Duration
requestFunc retry.RequestFunc
// stopCtx is used as a parent context for all exports. Therefore, when it
// is canceled with the stopFunc all exports are canceled.
stopCtx context.Context
// stopFunc cancels stopCtx, stopping any active exports.
stopFunc context.CancelFunc
// ourConn keeps track of where conn was created: true if created here on
// Start, or false if passed with an option. This is important on Shutdown
// as the conn should only be closed if created here on start. Otherwise,
// it is up to the processes that passed the conn to close it.
ourConn bool
conn *grpc.ClientConn
tscMu sync.RWMutex
tsc coltracepb.TraceServiceClient
}
// Compile time check *client implements otlptrace.Client.
var _ otlptrace.Client = (*client)(nil)
// NewClient creates a new gRPC trace client.
func NewClient(opts ...Option) otlptrace.Client {
return newClient(opts...)
}
func newClient(opts ...Option) *client {
cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
ctx, cancel := context.WithCancel(context.Background())
c := &client{
endpoint: cfg.Traces.Endpoint,
exportTimeout: cfg.Traces.Timeout,
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
dialOpts: cfg.DialOptions,
stopCtx: ctx,
stopFunc: cancel,
conn: cfg.GRPCConn,
}
if len(cfg.Traces.Headers) > 0 {
c.metadata = metadata.New(cfg.Traces.Headers)
}
return c
}
// Start establishes a gRPC connection to the collector.
func (c *client) Start(ctx context.Context) error {
if c.conn == nil {
// If the caller did not provide a ClientConn when the client was
// created, create one using the configuration they did provide.
conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
if err != nil {
return err
}
// Keep track that we own the lifecycle of this conn and need to close
// it on Shutdown.
c.ourConn = true
c.conn = conn
}
// The otlptrace.Client interface states this method is called just once,
// so no need to check if already started.
c.tscMu.Lock()
c.tsc = coltracepb.NewTraceServiceClient(c.conn)
c.tscMu.Unlock()
return nil
}
var errAlreadyStopped = errors.New("the client is already stopped")
// Stop shuts down the client.
//
// Any active connections to a remote endpoint are closed if they were created
// by the client. Any gRPC connection passed during creation using
// WithGRPCConn will not be closed. It is the caller's responsibility to
// handle cleanup of that resource.
//
// This method synchronizes with the UploadTraces method of the client. It
// will wait for any active calls to that method to complete unimpeded, or it
// will cancel any active calls if ctx expires. If ctx expires, the context
// error will be forwarded as the returned error. All client held resources
// will still be released in this situation.
//
// If the client has already stopped, an error will be returned describing
// this.
func (c *client) Stop(ctx context.Context) error {
// Acquire the c.tscMu lock within the ctx lifetime.
acquired := make(chan struct{})
go func() {
c.tscMu.Lock()
close(acquired)
}()
var err error
select {
case <-ctx.Done():
// The Stop timeout is reached. Kill any remaining exports to force
// the clear of the lock and save the timeout error to return and
// signal the shutdown timed out before cleanly stopping.
c.stopFunc()
err = ctx.Err()
// To ensure the client is not left in a dirty state c.tsc needs to be
// set to nil. To avoid the race condition when doing this, ensure
// that all the exports are killed (initiated by c.stopFunc).
<-acquired
case <-acquired:
}
// Hold the tscMu lock for the rest of the function to ensure no new
// exports are started.
defer c.tscMu.Unlock()
// The otlptrace.Client interface states this method is called only
// once, but there is no guarantee it is called after Start. Ensure the
// client is started before doing anything and let the called know if they
// made a mistake.
if c.tsc == nil {
return errAlreadyStopped
}
// Clear c.tsc to signal the client is stopped.
c.tsc = nil
if c.ourConn {
closeErr := c.conn.Close()
// A context timeout error takes precedence over this error.
if err == nil && closeErr != nil {
err = closeErr
}
}
return err
}
var errShutdown = errors.New("the client is shutdown")
// UploadTraces sends a batch of spans.
//
// Retryable errors from the server will be handled according to any
// RetryConfig the client was created with.
func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
// Hold a read lock to ensure a shut down initiated after this starts does
// not abandon the export. This read lock acquire has less priority than a
// write lock acquire (i.e. Stop), meaning if the client is shutting down
// this will come after the shut down.
c.tscMu.RLock()
defer c.tscMu.RUnlock()
if c.tsc == nil {
return errShutdown
}
ctx, cancel := c.exportContext(ctx)
defer cancel()
return c.requestFunc(ctx, func(iCtx context.Context) error {
resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
})
if resp != nil && resp.PartialSuccess != nil {
otel.Handle(internal.PartialSuccessToError(
internal.TracingPartialSuccess,
resp.PartialSuccess.RejectedSpans,
resp.PartialSuccess.ErrorMessage,
))
}
// nil is converted to OK.
if status.Code(err) == codes.OK {
// Success.
return nil
}
return err
})
}
// exportContext returns a copy of parent with an appropriate deadline and
// cancellation function.
//
// It is the callers responsibility to cancel the returned context once its
// use is complete, via the parent or directly with the returned CancelFunc, to
// ensure all resources are correctly released.
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
var (
ctx context.Context
cancel context.CancelFunc
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
} else {
ctx, cancel = context.WithCancel(parent)
}
if c.metadata.Len() > 0 {
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
}
// Unify the client stopCtx with the parent.
go func() {
select {
case <-ctx.Done():
case <-c.stopCtx.Done():
// Cancel the export as the shutdown has timed out.
cancel()
}
}()
return ctx, cancel
}
// retryable returns if err identifies a request that can be retried and a
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
//func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
switch s.Code() {
case codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
return true, throttleDelay(s)
}
// Not a retry-able error.
return false, 0
}
// throttleDelay returns a duration to wait for if an explicit throttle time
// is included in the response status.
func throttleDelay(s *status.Status) time.Duration {
for _, detail := range s.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
return t.RetryDelay.AsDuration()
}
}
return 0
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
func (c *client) MarshalLog() interface{} {
return struct {
Type string
Endpoint string
}{
Type: "otlphttpgrpc",
Endpoint: c.endpoint,
}
}

View File

@ -0,0 +1,31 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"context"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
return otlptrace.New(ctx, NewClient(opts...))
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(opts ...Option) *otlptrace.Exporter {
return otlptrace.NewUnstarted(NewClient(opts...))
}

View File

@ -0,0 +1,189 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
)
// Option applies an option to the gRPC driver.
type Option interface {
applyGRPCOption(otlpconfig.Config) otlpconfig.Config
}
func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
converted := make([]otlpconfig.GRPCOption, len(opts))
for i, o := range opts {
converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
}
return converted
}
// RetryConfig defines configuration for retrying export of span batches that
// failed to be received by the target endpoint.
//
// This configuration does not define any network retry strategy. That is
// entirely handled by the gRPC ClientConn.
type RetryConfig retry.Config
type wrappedOption struct {
otlpconfig.GRPCOption
}
func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
return w.ApplyGRPCOption(cfg)
}
// WithInsecure disables client transport security for the exporter's gRPC
// connection just like grpc.WithInsecure()
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
// default, client security is required unless WithInsecure is used.
//
// This option has no effect if WithGRPCConn is used.
func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
// WithEndpoint sets the target endpoint the exporter will connect to. If
// unset, localhost:4317 will be used as a default.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
// This option has no effect if WithGRPCConn is used.
func WithReconnectionPeriod(rp time.Duration) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.ReconnectionPeriod = rp
return cfg
})}
}
func compressorToCompression(compressor string) otlpconfig.Compression {
if compressor == "gzip" {
return otlpconfig.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlpconfig.NoCompression
}
// WithCompressor sets the compressor for the gRPC client to use when sending
// requests. It is the responsibility of the caller to ensure that the
// compressor set has been registered with google.golang.org/grpc/encoding.
// This can be done by encoding.RegisterCompressor. Some compressors
// auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
//
// This option has no effect if WithGRPCConn is used.
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
// WithHeaders will send the provided headers with each gRPC requests.
func WithHeaders(headers map[string]string) Option {
return wrappedOption{otlpconfig.WithHeaders(headers)}
}
// WithTLSCredentials allows the connection to use TLS credentials when
// talking to the server. It takes in grpc.TransportCredentials instead of say
// a Certificate file or a tls.Certificate, because the retrieving of these
// credentials can be done in many ways e.g. plain file, in code tls.Config or
// by certificate rotation, so it is up to the caller to decide what to use.
//
// This option has no effect if WithGRPCConn is used.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.Traces.GRPCCredentials = creds
return cfg
})}
}
// WithServiceConfig defines the default gRPC service config used.
//
// This option has no effect if WithGRPCConn is used.
func WithServiceConfig(serviceConfig string) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.ServiceConfig = serviceConfig
return cfg
})}
}
// WithDialOption sets explicit grpc.DialOptions to use when making a
// connection. The options here are appended to the internal grpc.DialOptions
// used so they will take precedence over any other internal grpc.DialOptions
// they might conflict with.
//
// This option has no effect if WithGRPCConn is used.
func WithDialOption(opts ...grpc.DialOption) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.DialOptions = opts
return cfg
})}
}
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
//
// This option takes precedence over any other option that relates to
// establishing or persisting a gRPC connection to a target endpoint. Any
// other option of those types passed will be ignored.
//
// It is the callers responsibility to close the passed conn. The client
// Shutdown method will not close this connection.
func WithGRPCConn(conn *grpc.ClientConn) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.GRPCConn = conn
return cfg
})}
}
// WithTimeout sets the max amount of time a client will attempt to export a
// batch of spans. This takes precedence over any retry settings defined with
// WithRetry, once this time limit has been reached the export is abandoned
// and the batch of spans is dropped.
//
// If unset, the default timeout will be set to 10 seconds.
func WithTimeout(duration time.Duration) Option {
return wrappedOption{otlpconfig.WithTimeout(duration)}
}
// WithRetry sets the retry policy for transient retryable errors that may be
// returned by the target endpoint when exporting a batch of spans.
//
// If the target endpoint responds with not only a retryable error, but
// explicitly returns a backoff time in the response. That time will take
// precedence over these settings.
//
// These settings do not define any network retry strategy. That is entirely
// handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
// after each error for no more than a total time of 1 minute.
func WithRetry(settings RetryConfig) Option {
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
}

View File

@ -1,145 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
"context"
"sync"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
// ProtocolDriver is an interface used by OTLP exporter. It's
// responsible for connecting to and disconnecting from the collector,
// and for transforming traces and metrics into wire format and
// transmitting them to the collector.
type ProtocolDriver interface {
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
Start(ctx context.Context) error
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with ExportMetrics or ExportTraces, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
Stop(ctx context.Context) error
// ExportMetrics should transform the passed metrics to the
// wire format and send it to the collector. May be called
// concurrently with ExportTraces, so the manager needs to
// take this into account by doing proper locking.
ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error
// ExportTraces should transform the passed traces to the wire
// format and send it to the collector. May be called
// concurrently with ExportMetrics, so the manager needs to
// take this into account by doing proper locking.
ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error
}
// SplitConfig is used to configure a split driver.
type SplitConfig struct {
// ForMetrics driver will be used for sending metrics to the
// collector.
ForMetrics ProtocolDriver
// ForTraces driver will be used for sending spans to the
// collector.
ForTraces ProtocolDriver
}
type splitDriver struct {
metric ProtocolDriver
trace ProtocolDriver
}
var _ ProtocolDriver = (*splitDriver)(nil)
// NewSplitDriver creates a protocol driver which contains two other
// protocol drivers and will forward traces to one of them and metrics
// to another.
func NewSplitDriver(cfg SplitConfig) ProtocolDriver {
return &splitDriver{
metric: cfg.ForMetrics,
trace: cfg.ForTraces,
}
}
// Start implements ProtocolDriver. It starts both drivers at the same
// time.
func (d *splitDriver) Start(ctx context.Context) error {
wg := sync.WaitGroup{}
wg.Add(2)
var (
metricErr error
traceErr error
)
go func() {
defer wg.Done()
metricErr = d.metric.Start(ctx)
}()
go func() {
defer wg.Done()
traceErr = d.trace.Start(ctx)
}()
wg.Wait()
if metricErr != nil {
return metricErr
}
if traceErr != nil {
return traceErr
}
return nil
}
// Stop implements ProtocolDriver. It stops both drivers at the same
// time.
func (d *splitDriver) Stop(ctx context.Context) error {
wg := sync.WaitGroup{}
wg.Add(2)
var (
metricErr error
traceErr error
)
go func() {
defer wg.Done()
metricErr = d.metric.Stop(ctx)
}()
go func() {
defer wg.Done()
traceErr = d.trace.Stop(ctx)
}()
wg.Wait()
if metricErr != nil {
return metricErr
}
if traceErr != nil {
return traceErr
}
return nil
}
// ExportMetrics implements ProtocolDriver. It forwards the call to
// the driver used for sending metrics.
func (d *splitDriver) ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
return d.metric.ExportMetrics(ctx, cps, selector)
}
// ExportTraces implements ProtocolDriver. It forwards the call to the
// driver used for sending spans.
func (d *splitDriver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
return d.trace.ExportTraces(ctx, ss)
}

View File

@ -18,7 +18,6 @@ import (
"log"
"os"
"sync"
"sync/atomic"
)
var (
@ -26,64 +25,73 @@ var (
// throughout an OpenTelemetry instrumented project. When a user
// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
// `Handle` and will be delegated to the registered ErrorHandler.
globalErrorHandler = &loggingErrorHandler{
l: log.New(os.Stderr, "", log.LstdFlags),
}
globalErrorHandler = defaultErrorHandler()
// delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
// only ever registered once.
delegateErrorHandlerOnce sync.Once
// Comiple time check that loggingErrorHandler implements ErrorHandler.
_ ErrorHandler = (*loggingErrorHandler)(nil)
// Compile-time check that delegator implements ErrorHandler.
_ ErrorHandler = (*delegator)(nil)
// Compile-time check that errLogger implements ErrorHandler.
_ ErrorHandler = (*errLogger)(nil)
)
// loggingErrorHandler logs all errors to STDERR.
type loggingErrorHandler struct {
delegate atomic.Value
type delegator struct {
lock *sync.RWMutex
eh ErrorHandler
}
func (d *delegator) Handle(err error) {
d.lock.RLock()
defer d.lock.RUnlock()
d.eh.Handle(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *delegator) setDelegate(eh ErrorHandler) {
d.lock.Lock()
defer d.lock.Unlock()
d.eh = eh
}
func defaultErrorHandler() *delegator {
return &delegator{
lock: &sync.RWMutex{},
eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)},
}
}
// errLogger logs errors if no delegate is set, otherwise they are delegated.
type errLogger struct {
l *log.Logger
}
// setDelegate sets the ErrorHandler delegate if one is not already set.
func (h *loggingErrorHandler) setDelegate(d ErrorHandler) {
if h.delegate.Load() != nil {
// Delegate already registered
return
}
h.delegate.Store(d)
}
// Handle implements ErrorHandler.
func (h *loggingErrorHandler) Handle(err error) {
if d := h.delegate.Load(); d != nil {
d.(ErrorHandler).Handle(err)
return
}
// Handle logs err if no delegate is set, otherwise it is delegated.
func (h *errLogger) Handle(err error) {
h.l.Print(err)
}
// GetErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
// instance has been set (`SetErrorHandler`), the default ErrorHandler which
// logs errors to STDERR is returned.
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
// until an override ErrorHandler is set with SetErrorHandler. All
// ErrorHandler returned prior to this will automatically forward errors to
// the set instance instead of logging.
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return globalErrorHandler
}
// SetErrorHandler sets the global ErrorHandler to be h.
// SetErrorHandler sets the global ErrorHandler to h.
//
// The first time this is called all ErrorHandler previously returned from
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
delegateErrorHandlerOnce.Do(func() {
current := GetErrorHandler()
if current == h {
return
}
if internalHandler, ok := current.(*loggingErrorHandler); ok {
internalHandler.setDelegate(h)
}
})
globalErrorHandler.setDelegate(h)
}
// Handle is a convience function for ErrorHandler().Handle(err)
// Handle is a convenience function for ErrorHandler().Handle(err).
func Handle(err error) {
GetErrorHandler().Handle(err)
}

View File

@ -12,327 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package baggage provides types and functions to manage W3C Baggage.
package baggage
/*
Package baggage provides base types and functionality to store and retrieve
baggage in Go context. This package exists because the OpenTracing bridge to
OpenTelemetry needs to synchronize state whenever baggage for a context is
modified and that context contains an OpenTracing span. If it were not for
this need this package would not need to exist and the
`go.opentelemetry.io/otel/baggage` package would be the singular place where
W3C baggage is handled.
*/
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
import (
"context"
// List is the collection of baggage members. The W3C allows for duplicates,
// but OpenTelemetry does not, therefore, this is represented as a map.
type List map[string]Item
"go.opentelemetry.io/otel/attribute"
)
type rawMap map[attribute.Key]attribute.Value
type keySet map[attribute.Key]struct{}
// Map is an immutable storage for correlations.
type Map struct {
m rawMap
// Item is the value and metadata properties part of a list-member.
type Item struct {
Value string
Properties []Property
}
// MapUpdate contains information about correlation changes to be
// made.
type MapUpdate struct {
// DropSingleK contains a single key to be dropped from
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key to drop.
DropSingleK attribute.Key
// DropMultiK contains all the keys to be dropped from
// correlations.
DropMultiK []attribute.Key
// Property is a metadata entry for a list-member.
type Property struct {
Key, Value string
// SingleKV contains a single key-value pair to be added to
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key-value pair to add.
SingleKV attribute.KeyValue
// MultiKV contains all the key-value pairs to be added to
// correlations.
MultiKV []attribute.KeyValue
}
func newMap(raw rawMap) Map {
return Map{
m: raw,
}
}
// NewEmptyMap creates an empty correlations map.
func NewEmptyMap() Map {
return newMap(nil)
}
// NewMap creates a map with the contents of the update applied. In
// this function, having an update with DropSingleK or DropMultiK
// makes no sense - those fields are effectively ignored.
func NewMap(update MapUpdate) Map {
return NewEmptyMap().Apply(update)
}
// Apply creates a copy of the map with the contents of the update
// applied. Apply will first drop the keys from DropSingleK and
// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
func (m Map) Apply(update MapUpdate) Map {
delSet, addSet := getModificationSets(update)
mapSize := getNewMapSize(m.m, delSet, addSet)
r := make(rawMap, mapSize)
for k, v := range m.m {
// do not copy items we want to drop
if _, ok := delSet[k]; ok {
continue
}
// do not copy items we would overwrite
if _, ok := addSet[k]; ok {
continue
}
r[k] = v
}
if update.SingleKV.Key.Defined() {
r[update.SingleKV.Key] = update.SingleKV.Value
}
for _, kv := range update.MultiKV {
r[kv.Key] = kv.Value
}
if len(r) == 0 {
r = nil
}
return newMap(r)
}
func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
deletionsCount := len(update.DropMultiK)
if update.DropSingleK.Defined() {
deletionsCount++
}
if deletionsCount > 0 {
delSet = make(map[attribute.Key]struct{}, deletionsCount)
for _, k := range update.DropMultiK {
delSet[k] = struct{}{}
}
if update.DropSingleK.Defined() {
delSet[update.DropSingleK] = struct{}{}
}
}
additionsCount := len(update.MultiKV)
if update.SingleKV.Key.Defined() {
additionsCount++
}
if additionsCount > 0 {
addSet = make(map[attribute.Key]struct{}, additionsCount)
for _, k := range update.MultiKV {
addSet[k.Key] = struct{}{}
}
if update.SingleKV.Key.Defined() {
addSet[update.SingleKV.Key] = struct{}{}
}
}
return
}
func getNewMapSize(m rawMap, delSet, addSet keySet) int {
mapSizeDiff := 0
for k := range addSet {
if _, ok := m[k]; !ok {
mapSizeDiff++
}
}
for k := range delSet {
if _, ok := m[k]; ok {
if _, inAddSet := addSet[k]; !inAddSet {
mapSizeDiff--
}
}
}
return len(m) + mapSizeDiff
}
// Value gets a value from correlations map and returns a boolean
// value indicating whether the key exist in the map.
func (m Map) Value(k attribute.Key) (attribute.Value, bool) {
value, ok := m.m[k]
return value, ok
}
// HasValue returns a boolean value indicating whether the key exist
// in the map.
func (m Map) HasValue(k attribute.Key) bool {
_, has := m.Value(k)
return has
}
// Len returns a length of the map.
func (m Map) Len() int {
return len(m.m)
}
// Foreach calls a passed callback once on each key-value pair until
// all the key-value pairs of the map were iterated or the callback
// returns false, whichever happens first.
func (m Map) Foreach(f func(attribute.KeyValue) bool) {
for k, v := range m.m {
if !f(attribute.KeyValue{
Key: k,
Value: v,
}) {
return
}
}
}
type correlationsType struct{}
// SetHookFunc describes a type of a callback that is called when
// storing baggage in the context.
type SetHookFunc func(context.Context) context.Context
// GetHookFunc describes a type of a callback that is called when
// getting baggage from the context.
type GetHookFunc func(context.Context, Map) Map
// value under this key is either of type Map or correlationsData
var correlationsKey = &correlationsType{}
type correlationsData struct {
m Map
setHook SetHookFunc
getHook GetHookFunc
}
func (d correlationsData) isHookless() bool {
return d.setHook == nil && d.getHook == nil
}
type hookKind int
const (
hookKindSet hookKind = iota
hookKindGet
)
func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
switch kind {
case hookKindSet:
d.setHook = setHook
case hookKindGet:
d.getHook = getHook
}
}
// ContextWithSetHook installs a hook function that will be invoked
// every time ContextWithMap is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls ContextWithMap, so the hook will be
// invoked.
//
// Passing nil SetHookFunc creates a context with no set hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
return contextWithHook(ctx, hookKindSet, hook, nil)
}
// ContextWithGetHook installs a hook function that will be invoked
// every time MapFromContext is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls MapFromContext, so the hook will be
// invoked.
//
// Passing nil GetHookFunc creates a context with no get hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
return contextWithHook(ctx, hookKindGet, nil, hook)
}
func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.overrideHook(kind, setHook, getHook)
if v.isHookless() {
return context.WithValue(ctx, correlationsKey, v.m)
}
return context.WithValue(ctx, correlationsKey, v)
case Map:
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
default:
m := NewEmptyMap()
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
}
}
func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
d := correlationsData{m: m}
d.overrideHook(kind, setHook, getHook)
if d.isHookless() {
return ctx
}
return context.WithValue(ctx, correlationsKey, d)
}
// ContextWithNoHooks creates a context with all the hooks
// disabled. Also returns old set and get hooks. This function can be
// used to temporarily clear the context from hooks and then reinstate
// them by calling ContextWithSetHook and ContextWithGetHook functions
// passing the hooks returned by this function.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
default:
return ctx, nil, nil
}
}
// ContextWithMap returns a context with the Map entered into it.
func ContextWithMap(ctx context.Context, m Map) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.m = m
ctx = context.WithValue(ctx, correlationsKey, v)
if v.setHook != nil {
ctx = v.setHook(ctx)
}
return ctx
default:
return context.WithValue(ctx, correlationsKey, m)
}
}
// ContextWithNoCorrelationData returns a context stripped of correlation
// data.
func ContextWithNoCorrelationData(ctx context.Context) context.Context {
return context.WithValue(ctx, correlationsKey, nil)
}
// NewContext returns a context with the map from passed context
// updated with the passed key-value pairs.
func NewContext(ctx context.Context, keyvalues ...attribute.KeyValue) context.Context {
return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
MultiKV: keyvalues,
}))
}
// MapFromContext gets the current Map from a Context.
func MapFromContext(ctx context.Context) Map {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
if v.getHook != nil {
return v.getHook(ctx, v.m)
}
return v.m
case Map:
return v
default:
return NewEmptyMap()
}
// HasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
HasValue bool
}

View File

@ -0,0 +1,92 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
import "context"
type baggageContextKeyType int
const baggageKey baggageContextKeyType = iota
// SetHookFunc is a callback called when storing baggage in the context.
type SetHookFunc func(context.Context, List) context.Context
// GetHookFunc is a callback called when getting baggage from the context.
type GetHookFunc func(context.Context, List) List
type baggageState struct {
list List
setHook SetHookFunc
getHook GetHookFunc
}
// ContextWithSetHook returns a copy of parent with hook configured to be
// invoked every time ContextWithBaggage is called.
//
// Passing nil SetHookFunc creates a context with no set hook to call.
func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.setHook = hook
return context.WithValue(parent, baggageKey, s)
}
// ContextWithGetHook returns a copy of parent with hook configured to be
// invoked every time FromContext is called.
//
// Passing nil GetHookFunc creates a context with no get hook to call.
func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.getHook = hook
return context.WithValue(parent, baggageKey, s)
}
// ContextWithList returns a copy of parent with baggage. Passing nil list
// returns a context without any baggage.
func ContextWithList(parent context.Context, list List) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.list = list
ctx := context.WithValue(parent, baggageKey, s)
if s.setHook != nil {
ctx = s.setHook(ctx, list)
}
return ctx
}
// ListFromContext returns the baggage contained in ctx.
func ListFromContext(ctx context.Context) List {
switch v := ctx.Value(baggageKey).(type) {
case baggageState:
if v.getHook != nil {
return v.getHook(ctx, v.list)
}
return v.list
default:
return nil
}
}

View File

@ -0,0 +1,63 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"os"
"sync"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
)
// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals.
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
var globalLoggerLock = &sync.RWMutex{}
// SetLogger overrides the globalLogger with l.
//
// To see Info messages use a logger with `l.V(1).Enabled() == true`
// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
func SetLogger(l logr.Logger) {
globalLoggerLock.Lock()
defer globalLoggerLock.Unlock()
globalLogger = l
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less then 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.V(1).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.V(5).Info(msg, keysAndValues...)
}

View File

@ -1,348 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"context"
"sync"
"sync/atomic"
"unsafe"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/registry"
)
// This file contains the forwarding implementation of MeterProvider used as
// the default global instance. Metric events using instruments provided by
// this implementation are no-ops until the first Meter implementation is set
// as the global provider.
//
// The implementation here uses Mutexes to maintain a list of active Meters in
// the MeterProvider and Instruments in each Meter, under the assumption that
// these interfaces are not performance-critical.
//
// We have the invariant that setDelegate() will be called before a new
// MeterProvider implementation is registered as the global provider. Mutexes
// in the MeterProvider and Meters ensure that each instrument has a delegate
// before the global provider is set.
//
// Bound instrument operations are implemented by delegating to the
// instrument after it is registered, with a sync.Once initializer to
// protect against races with Release().
//
// Metric uniqueness checking is implemented by calling the exported
// methods of the api/metric/registry package.
type meterKey struct {
Name, Version string
}
type meterProvider struct {
delegate metric.MeterProvider
// lock protects `delegate` and `meters`.
lock sync.Mutex
// meters maintains a unique entry for every named Meter
// that has been registered through the global instance.
meters map[meterKey]*meterEntry
}
type meterImpl struct {
delegate unsafe.Pointer // (*metric.MeterImpl)
lock sync.Mutex
syncInsts []*syncImpl
asyncInsts []*asyncImpl
}
type meterEntry struct {
unique metric.MeterImpl
impl meterImpl
}
type instrument struct {
descriptor metric.Descriptor
}
type syncImpl struct {
delegate unsafe.Pointer // (*metric.SyncImpl)
instrument
}
type asyncImpl struct {
delegate unsafe.Pointer // (*metric.AsyncImpl)
instrument
runner metric.AsyncRunner
}
// SyncImpler is implemented by all of the sync metric
// instruments.
type SyncImpler interface {
SyncImpl() metric.SyncImpl
}
// AsyncImpler is implemented by all of the async
// metric instruments.
type AsyncImpler interface {
AsyncImpl() metric.AsyncImpl
}
type syncHandle struct {
delegate unsafe.Pointer // (*metric.BoundInstrumentImpl)
inst *syncImpl
labels []attribute.KeyValue
initialize sync.Once
}
var _ metric.MeterProvider = &meterProvider{}
var _ metric.MeterImpl = &meterImpl{}
var _ metric.InstrumentImpl = &syncImpl{}
var _ metric.BoundSyncImpl = &syncHandle{}
var _ metric.AsyncImpl = &asyncImpl{}
func (inst *instrument) Descriptor() metric.Descriptor {
return inst.descriptor
}
// MeterProvider interface and delegation
func newMeterProvider() *meterProvider {
return &meterProvider{
meters: map[meterKey]*meterEntry{},
}
}
func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
p.lock.Lock()
defer p.lock.Unlock()
p.delegate = provider
for key, entry := range p.meters {
entry.impl.setDelegate(key.Name, key.Version, provider)
}
p.meters = nil
}
func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
p.lock.Lock()
defer p.lock.Unlock()
if p.delegate != nil {
return p.delegate.Meter(instrumentationName, opts...)
}
key := meterKey{
Name: instrumentationName,
Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
}
entry, ok := p.meters[key]
if !ok {
entry = &meterEntry{}
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
p.meters[key] = entry
}
return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
}
// Meter interface and delegation
func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
m.lock.Lock()
defer m.lock.Unlock()
d := new(metric.MeterImpl)
*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
m.delegate = unsafe.Pointer(d)
for _, inst := range m.syncInsts {
inst.setDelegate(*d)
}
m.syncInsts = nil
for _, obs := range m.asyncInsts {
obs.setDelegate(*d)
}
m.asyncInsts = nil
}
func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewSyncInstrument(desc)
}
inst := &syncImpl{
instrument: instrument{
descriptor: desc,
},
}
m.syncInsts = append(m.syncInsts, inst)
return inst, nil
}
// Synchronous delegation
func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.SyncImpl)
var err error
*implPtr, err = d.NewSyncInstrument(inst.descriptor)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
}
func (inst *syncImpl) Implementation() interface{} {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return inst
}
func (inst *syncImpl) Bind(labels []attribute.KeyValue) metric.BoundSyncImpl {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Bind(labels)
}
return &syncHandle{
inst: inst,
labels: labels,
}
}
func (bound *syncHandle) Unbind() {
bound.initialize.Do(func() {})
implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
if implPtr == nil {
return
}
(*implPtr).Unbind()
}
// Async delegation
func (m *meterImpl) NewAsyncInstrument(
desc metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewAsyncInstrument(desc, runner)
}
inst := &asyncImpl{
instrument: instrument{
descriptor: desc,
},
runner: runner,
}
m.asyncInsts = append(m.asyncInsts, inst)
return inst, nil
}
func (obs *asyncImpl) Implementation() interface{} {
if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return obs
}
func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.AsyncImpl)
var err error
*implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
}
// Metric updates
func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) {
if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
(*delegatePtr).RecordBatch(ctx, labels, measurements...)
}
}
func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) {
if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
(*instPtr).RecordOne(ctx, number, labels)
}
}
// Bound instrument initialization
func (bound *syncHandle) RecordOne(ctx context.Context, number number.Number) {
instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
if instPtr == nil {
return
}
var implPtr *metric.BoundSyncImpl
bound.initialize.Do(func() {
implPtr = new(metric.BoundSyncImpl)
*implPtr = (*instPtr).Bind(bound.labels)
atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
})
if implPtr == nil {
implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
}
// This may still be nil if instrument was created and bound
// without a delegate, then the instrument was set to have a
// delegate and unbound.
if implPtr == nil {
return
}
(*implPtr).RecordOne(ctx, number)
}
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
"meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
"syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
"asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
"syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"context"

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"errors"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@ -28,10 +28,6 @@ type (
tp trace.TracerProvider
}
meterProviderHolder struct {
mp metric.MeterProvider
}
propagatorsHolder struct {
tm propagation.TextMapPropagator
}
@ -39,10 +35,8 @@ type (
var (
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce sync.Once
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
)
@ -54,43 +48,28 @@ func TracerProvider() trace.TracerProvider {
// SetTracerProvider is the internal implementation for global.SetTracerProvider.
func SetTracerProvider(tp trace.TracerProvider) {
current := TracerProvider()
if _, cOk := current.(*tracerProvider); cOk {
if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
// Do not assign the default delegating TracerProvider to delegate
// to itself.
Error(
errors.New("no delegate configured in tracer provider"),
"Setting tracer provider to it's current value. No delegate will be configured",
)
return
}
}
delegateTraceOnce.Do(func() {
current := TracerProvider()
if current == tp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid TracerProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*tracerProvider); ok {
if def, ok := current.(*tracerProvider); ok {
def.setDelegate(tp)
}
})
globalTracer.Store(tracerProviderHolder{tp: tp})
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeter.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
delegateMeterOnce.Do(func() {
current := MeterProvider()
if current == mp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid MeterProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeter.Store(meterProviderHolder{mp: mp})
}
// TextMapPropagator is the internal implementation for global.TextMapPropagator.
func TextMapPropagator() propagation.TextMapPropagator {
return globalPropagators.Load().(propagatorsHolder).tm
@ -98,15 +77,24 @@ func TextMapPropagator() propagation.TextMapPropagator {
// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
func SetTextMapPropagator(p propagation.TextMapPropagator) {
current := TextMapPropagator()
if _, cOk := current.(*textMapPropagator); cOk {
if _, pOk := p.(*textMapPropagator); pOk && current == p {
// Do not assign the default delegating TextMapPropagator to
// delegate to itself.
Error(
errors.New("no delegate configured in text map propagator"),
"Setting text map propagator to it's current value. No delegate will be configured",
)
return
}
}
// For the textMapPropagator already returned by TextMapPropagator
// delegate to p.
delegateTextMapPropagatorOnce.Do(func() {
if current := TextMapPropagator(); current == p {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid TextMapPropagator, the global instance cannot be reinstalled")
} else if def, ok := current.(*textMapPropagator); ok {
if def, ok := current.(*textMapPropagator); ok {
def.SetDelegate(p)
}
})
@ -120,24 +108,8 @@ func defaultTracerValue() *atomic.Value {
return v
}
func defaultMeterValue() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: newMeterProvider()})
return v
}
func defaultPropagatorsValue() *atomic.Value {
v := &atomic.Value{}
v.Store(propagatorsHolder{tm: newTextMapPropagator()})
return v
}
// ResetForTest restores the initial global state, for testing purposes.
func ResetForTest() {
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce = sync.Once{}
delegateTraceOnce = sync.Once{}
delegateTextMapPropagatorOnce = sync.Once{}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
/*
This file contains the forwarding implementation of the TracerProvider used as
@ -36,7 +36,8 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/trace/noop"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
)
@ -89,9 +90,10 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
// At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
c := trace.NewTracerConfig(opts...)
key := il{
name: name,
version: trace.NewTracerConfig(opts...).InstrumentationVersion,
version: c.InstrumentationVersion(),
}
if p.tracers == nil {
@ -102,7 +104,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return val
}
t := &tracer{name: name, opts: opts}
t := &tracer{name: name, opts: opts, provider: p}
p.tracers[key] = t
return t
}
@ -117,8 +119,9 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
name string
opts []trace.TracerOption
name string
opts []trace.TracerOption
provider *tracerProvider
delegate atomic.Value
}
@ -138,10 +141,52 @@ func (t *tracer) setDelegate(provider trace.TracerProvider) {
// Start implements trace.Tracer by forwarding the call to t.delegate if
// set, otherwise it forwards the call to a NoopTracer.
func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
delegate := t.delegate.Load()
if delegate != nil {
return delegate.(trace.Tracer).Start(ctx, name, opts...)
}
return noop.Tracer.Start(ctx, name, opts...)
s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
ctx = trace.ContextWithSpan(ctx, s)
return ctx, s
}
// nonRecordingSpan is a minimal implementation of a Span that wraps a
// SpanContext. It performs no operations other than to return the wrapped
// SpanContext.
type nonRecordingSpan struct {
sc trace.SpanContext
tracer *tracer
}
var _ trace.Span = nonRecordingSpan{}
// SpanContext returns the wrapped SpanContext.
func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
// IsRecording always returns false.
func (nonRecordingSpan) IsRecording() bool { return false }
// SetStatus does nothing.
func (nonRecordingSpan) SetStatus(codes.Code, string) {}
// SetError does nothing.
func (nonRecordingSpan) SetError(bool) {}
// SetAttributes does nothing.
func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
// End does nothing.
func (nonRecordingSpan) End(...trace.SpanEndOption) {}
// RecordError does nothing.
func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
// SetName does nothing.
func (nonRecordingSpan) SetName(string) {}
func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }

View File

@ -1,148 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
)
var ErrInvalidAsyncRunner = errors.New("unknown async runner type")
// AsyncCollector is an interface used between the MeterImpl and the
// AsyncInstrumentState helper below. This interface is implemented by
// the SDK to provide support for running observer callbacks.
type AsyncCollector interface {
// CollectAsync passes a batch of observations to the MeterImpl.
CollectAsync(labels []attribute.KeyValue, observation ...metric.Observation)
}
// AsyncInstrumentState manages an ordered set of asynchronous
// instruments and the distinct runners, taking into account batch
// observer callbacks.
type AsyncInstrumentState struct {
lock sync.Mutex
// errorOnce will use the otel.Handler to report an error
// once in case of an invalid runner attempting to run.
errorOnce sync.Once
// runnerMap keeps the set of runners that will run each
// collection interval. Singletons are entered with a real
// instrument each, batch observers are entered with a nil
// instrument, ensuring that when a singleton callback is used
// repeatedly, it is executed repeatedly in the interval, while
// when a batch callback is used repeatedly, it only executes
// once per interval.
runnerMap map[asyncRunnerPair]struct{}
// runners maintains the set of runners in the order they were
// registered.
runners []asyncRunnerPair
// instruments maintains the set of instruments in the order
// they were registered.
instruments []metric.AsyncImpl
}
// asyncRunnerPair is a map entry for Observer callback runners.
type asyncRunnerPair struct {
// runner is used as a map key here. The API ensures
// that all callbacks are pointers for this reason.
runner metric.AsyncRunner
// inst refers to a non-nil instrument when `runner` is a
// AsyncSingleRunner.
inst metric.AsyncImpl
}
// NewAsyncInstrumentState returns a new *AsyncInstrumentState, for
// use by MeterImpl to manage running the set of observer callbacks in
// the correct order.
func NewAsyncInstrumentState() *AsyncInstrumentState {
return &AsyncInstrumentState{
runnerMap: map[asyncRunnerPair]struct{}{},
}
}
// Instruments returns the asynchronous instruments managed by this
// object, the set that should be checkpointed after observers are
// run.
func (a *AsyncInstrumentState) Instruments() []metric.AsyncImpl {
a.lock.Lock()
defer a.lock.Unlock()
return a.instruments
}
// Register adds a new asynchronous instrument to by managed by this
// object. This should be called during NewAsyncInstrument() and
// assumes that errors (e.g., duplicate registration) have already
// been checked.
func (a *AsyncInstrumentState) Register(inst metric.AsyncImpl, runner metric.AsyncRunner) {
a.lock.Lock()
defer a.lock.Unlock()
a.instruments = append(a.instruments, inst)
// asyncRunnerPair reflects this callback in the asyncRunners
// list. If this is a batch runner, the instrument is nil.
// If this is a single-Observer runner, the instrument is
// included. This ensures that batch callbacks are called
// once and single callbacks are called once per instrument.
rp := asyncRunnerPair{
runner: runner,
}
if _, ok := runner.(metric.AsyncSingleRunner); ok {
rp.inst = inst
}
if _, ok := a.runnerMap[rp]; !ok {
a.runnerMap[rp] = struct{}{}
a.runners = append(a.runners, rp)
}
}
// Run executes the complete set of observer callbacks.
func (a *AsyncInstrumentState) Run(ctx context.Context, collector AsyncCollector) {
a.lock.Lock()
runners := a.runners
a.lock.Unlock()
for _, rp := range runners {
// The runner must be a single or batch runner, no
// other implementations are possible because the
// interface has un-exported methods.
if singleRunner, ok := rp.runner.(metric.AsyncSingleRunner); ok {
singleRunner.Run(ctx, rp.inst, collector.CollectAsync)
continue
}
if multiRunner, ok := rp.runner.(metric.AsyncBatchRunner); ok {
multiRunner.Run(ctx, collector.CollectAsync)
continue
}
a.errorOnce.Do(func() {
otel.Handle(fmt.Errorf("%w: type %T (reported once)", ErrInvalidAsyncRunner, rp))
})
}
}

View File

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
package internal // import "go.opentelemetry.io/otel/internal"
import (
"math"
"unsafe"
)
func BoolToRaw(b bool) uint64 {
func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
if b {
return 1
}

View File

@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/sdk/metric"
package otel // import "go.opentelemetry.io/otel"
import "unsafe"
import (
"github.com/go-logr/logr"
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),
"record.updateCount": unsafe.Offsetof(record{}.updateCount),
}
"go.opentelemetry.io/otel/internal/global"
)
// SetLogger configures the logger used internally to opentelemetry.
func SetLogger(logger logr.Logger) {
global.SetLogger(logger)
}

View File

@ -14,85 +14,26 @@
package metric // import "go.opentelemetry.io/otel/metric"
import (
"go.opentelemetry.io/otel/unit"
)
// InstrumentConfig contains options for metric instrument descriptors.
type InstrumentConfig struct {
// Description describes the instrument in human-readable terms.
Description string
// Unit describes the measurement unit for a instrument.
Unit unit.Unit
// InstrumentationName is the name of the library providing
// instrumentation.
InstrumentationName string
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
}
// InstrumentOption is an interface for applying metric instrument options.
type InstrumentOption interface {
// ApplyMeter is used to set a InstrumentOption value of a
// InstrumentConfig.
ApplyInstrument(*InstrumentConfig)
}
// NewInstrumentConfig creates a new InstrumentConfig
// and applies all the given options.
func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
var config InstrumentConfig
for _, o := range opts {
o.ApplyInstrument(&config)
}
return config
}
// WithDescription applies provided description.
func WithDescription(desc string) InstrumentOption {
return descriptionOption(desc)
}
type descriptionOption string
func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
config.Description = string(d)
}
// WithUnit applies provided unit.
func WithUnit(unit unit.Unit) InstrumentOption {
return unitOption(unit)
}
type unitOption unit.Unit
func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
config.Unit = unit.Unit(u)
}
// WithInstrumentationName sets the instrumentation name.
func WithInstrumentationName(name string) InstrumentOption {
return instrumentationNameOption(name)
}
type instrumentationNameOption string
func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationName = string(i)
}
// MeterConfig contains options for Meters.
type MeterConfig struct {
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
instrumentationVersion string
schemaURL string
}
// InstrumentationVersion is the version of the library providing instrumentation.
func (cfg MeterConfig) InstrumentationVersion() string {
return cfg.instrumentationVersion
}
// SchemaURL is the schema_url of the library providing instrumentation.
func (cfg MeterConfig) SchemaURL() string {
return cfg.schemaURL
}
// MeterOption is an interface for applying Meter options.
type MeterOption interface {
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
ApplyMeter(*MeterConfig)
// applyMeter is used to set a MeterOption value of a MeterConfig.
applyMeter(MeterConfig) MeterConfig
}
// NewMeterConfig creates a new MeterConfig and applies
@ -100,29 +41,29 @@ type MeterOption interface {
func NewMeterConfig(opts ...MeterOption) MeterConfig {
var config MeterConfig
for _, o := range opts {
o.ApplyMeter(&config)
config = o.applyMeter(config)
}
return config
}
// InstrumentationOption is an interface for applying instrumentation specific
// options.
type InstrumentationOption interface {
InstrumentOption
MeterOption
type meterOptionFunc func(MeterConfig) MeterConfig
func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
return fn(cfg)
}
// WithInstrumentationVersion sets the instrumentation version.
func WithInstrumentationVersion(version string) InstrumentationOption {
return instrumentationVersionOption(version)
func WithInstrumentationVersion(version string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.instrumentationVersion = version
return config
})
}
type instrumentationVersionOption string
func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
config.InstrumentationVersion = string(i)
}
func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationVersion = string(i)
// WithSchemaURL sets the schema URL.
func WithSchemaURL(schemaURL string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.schemaURL = schemaURL
return config
})
}

View File

@ -19,49 +19,5 @@ OpenTelemetry API.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
Measurements can be made about an operation being performed or the state of a
system in general. These measurements can be crucial to the reliable operation
of code and provide valuable insights about the inner workings of a system.
Measurements are made using instruments provided by this package. The type of
instrument used will depend on the type of measurement being made and of what
part of a system is being measured.
Instruments are categorized as Synchronous or Asynchronous and independently
as Adding or Grouping. Synchronous instruments are called by the user with a
Context. Asynchronous instruments are called by the SDK during collection.
Additive instruments are semantically intended for capturing a sum. Grouping
instruments are intended for capturing a distribution.
Additive instruments may be monotonic, in which case they are non-decreasing
and naturally define a rate.
The synchronous instrument names are:
Counter: additive, monotonic
UpDownCounter: additive
ValueRecorder: grouping
and the asynchronous instruments are:
SumObserver: additive, monotonic
UpDownSumObserver: additive
ValueObserver: grouping
All instruments are provided with support for either float64 or int64 input
values.
An instrument is created using a Meter. Additionally, a Meter is used to
record batches of synchronous measurements or asynchronous observations. A
Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to
the instrumentation it instruments and must be named and versioned when
created with a MeterProvider with the name and version of the instrumentation
library.
Instrumentation should be designed to accept a MeterProvider from which it can
create its own unique Meter. Alternatively, the registered global
MeterProvider from the go.opentelemetry.io/otel package can be used as a
default.
*/
package metric // import "go.opentelemetry.io/otel/metric"

View File

@ -15,31 +15,24 @@
package global // import "go.opentelemetry.io/otel/metric/global"
import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/internal/global"
)
// Meter creates an implementation of the Meter interface from the global
// MeterProvider. The instrumentationName must be the name of the library
// providing instrumentation. This name may be the same as the instrumented
// code only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default name
// will be used instead.
// Meter returns a Meter from the global MeterProvider. The
// instrumentationName must be the name of the library providing
// instrumentation. This name may be the same as the instrumented code only if
// that code provides built-in instrumentation. If the instrumentationName is
// empty, then a implementation defined default name will be used instead.
//
// This is short for MeterProvider().Meter(name)
// This is short for MeterProvider().Meter(name).
func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return GetMeterProvider().Meter(instrumentationName, opts...)
return MeterProvider().Meter(instrumentationName, opts...)
}
// GetMeterProvider returns the registered global meter provider. If
// none is registered then a default meter provider is returned that
// forwards the Meter interface to the first registered Meter.
//
// Use the meter provider to create a named meter. E.g.
// meter := global.MeterProvider().Meter("example.com/foo")
// or
// meter := global.Meter("example.com/foo")
func GetMeterProvider() metric.MeterProvider {
// MeterProvider returns the registered global trace provider.
// If none is registered then a No-op MeterProvider is returned.
func MeterProvider() metric.MeterProvider {
return global.MeterProvider()
}

View File

@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Gauge creates an instrument for recording the current value.
Gauge(name string, opts ...instrument.Option) (Gauge, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// Gauge is an instrument that records independent readings.
type Gauge interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}

View File

@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Gauge creates an instrument for recording the current value.
Gauge(name string, opts ...instrument.Option) (Gauge, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// Gauge is an instrument that records independent readings.
type Gauge interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}

View File

@ -0,0 +1,69 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instrument // import "go.opentelemetry.io/otel/metric/instrument"
import "go.opentelemetry.io/otel/metric/unit"
// Config contains options for metric instrument descriptors.
type Config struct {
description string
unit unit.Unit
}
// Description describes the instrument in human-readable terms.
func (cfg Config) Description() string {
return cfg.description
}
// Unit describes the measurement unit for an instrument.
func (cfg Config) Unit() unit.Unit {
return cfg.unit
}
// Option is an interface for applying metric instrument options.
type Option interface {
applyInstrument(Config) Config
}
// NewConfig creates a new Config and applies all the given options.
func NewConfig(opts ...Option) Config {
var config Config
for _, o := range opts {
config = o.applyInstrument(config)
}
return config
}
type optionFunc func(Config) Config
func (fn optionFunc) applyInstrument(cfg Config) Config {
return fn(cfg)
}
// WithDescription applies provided description.
func WithDescription(desc string) Option {
return optionFunc(func(cfg Config) Config {
cfg.description = desc
return cfg
})
}
// WithUnit applies provided unit.
func WithUnit(u unit.Unit) Option {
return optionFunc(func(cfg Config) Config {
cfg.unit = u
return cfg
})
}

View File

@ -0,0 +1,30 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instrument // import "go.opentelemetry.io/otel/metric/instrument"
// Asynchronous instruments are instruments that are updated within a Callback.
// If an instrument is observed outside of it's callback it should be an error.
//
// This interface is used as a grouping mechanism.
type Asynchronous interface {
asynchronous()
}
// Synchronous instruments are updated in line with application code.
//
// This interface is used as a grouping mechanism.
type Synchronous interface {
synchronous()
}

View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Histogram creates an instrument for recording a distribution of values.
Histogram(name string, opts ...instrument.Option) (Histogram, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// Histogram is an instrument that records a distribution of values.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}

View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Histogram creates an instrument for recording a distribution of values.
Histogram(name string, opts ...instrument.Option) (Histogram, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// Histogram is an instrument that records a distribution of values.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}

View File

@ -1,28 +0,0 @@
// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT.
package metric
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ValueRecorderInstrumentKind-0]
_ = x[ValueObserverInstrumentKind-1]
_ = x[CounterInstrumentKind-2]
_ = x[UpDownCounterInstrumentKind-3]
_ = x[SumObserverInstrumentKind-4]
_ = x[UpDownSumObserverInstrumentKind-5]
}
const _InstrumentKind_name = "ValueRecorderInstrumentKindValueObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindSumObserverInstrumentKindUpDownSumObserverInstrumentKind"
var _InstrumentKind_index = [...]uint8{0, 27, 54, 75, 102, 127, 158}
func (i InstrumentKind) String() string {
if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) {
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
}

View File

@ -0,0 +1,360 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
"context"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
)
type afCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.Counter
instrument.Asynchronous
}
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...)
}
}
func (i *afCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.Counter)
}
return nil
}
type afUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.UpDownCounter
instrument.Asynchronous
}
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...)
}
}
func (i *afUpDownCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.UpDownCounter)
}
return nil
}
type afGauge struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.Gauge
instrument.Asynchronous
}
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...)
}
}
func (i *afGauge) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.Gauge)
}
return nil
}
type aiCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.Counter
instrument.Asynchronous
}
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.Counter).Observe(ctx, x, attrs...)
}
}
func (i *aiCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.Counter)
}
return nil
}
type aiUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.UpDownCounter
instrument.Asynchronous
}
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...)
}
}
func (i *aiUpDownCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.UpDownCounter)
}
return nil
}
type aiGauge struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.Gauge
instrument.Asynchronous
}
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...)
}
}
func (i *aiGauge) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.Gauge)
}
return nil
}
//Sync Instruments.
type sfCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.Counter
instrument.Synchronous
}
func (i *sfCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...)
}
}
type sfUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.UpDownCounter
instrument.Synchronous
}
func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...)
}
}
type sfHistogram struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.Histogram
instrument.Synchronous
}
func (i *sfHistogram) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...)
}
}
type siCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.Counter
instrument.Synchronous
}
func (i *siCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.Counter).Add(ctx, x, attrs...)
}
}
type siUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.UpDownCounter
instrument.Synchronous
}
func (i *siUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...)
}
}
type siHistogram struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.Histogram
instrument.Synchronous
}
func (i *siHistogram) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.Histogram).Record(ctx, x, attrs...)
}
}

View File

@ -0,0 +1,347 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
"context"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// meterProvider is a placeholder for a configured SDK MeterProvider.
//
// All MeterProvider functionality is forwarded to a delegate once
// configured.
type meterProvider struct {
mtx sync.Mutex
meters map[il]*meter
delegate metric.MeterProvider
}
type il struct {
name string
version string
}
// setDelegate configures p to delegate all MeterProvider functionality to
// provider.
//
// All Meters provided prior to this function call are switched out to be
// Meters provided by provider. All instruments and callbacks are recreated and
// delegated.
//
// It is guaranteed by the caller that this happens only once.
func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
p.mtx.Lock()
defer p.mtx.Unlock()
p.delegate = provider
if len(p.meters) == 0 {
return
}
for _, meter := range p.meters {
meter.setDelegate(provider)
}
p.meters = nil
}
// Meter implements MeterProvider.
func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
p.mtx.Lock()
defer p.mtx.Unlock()
if p.delegate != nil {
return p.delegate.Meter(name, opts...)
}
// At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
c := metric.NewMeterConfig(opts...)
key := il{
name: name,
version: c.InstrumentationVersion(),
}
if p.meters == nil {
p.meters = make(map[il]*meter)
}
if val, ok := p.meters[key]; ok {
return val
}
t := &meter{name: name, opts: opts}
p.meters[key] = t
return t
}
// meter is a placeholder for a metric.Meter.
//
// All Meter functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopMeter.
type meter struct {
name string
opts []metric.MeterOption
mtx sync.Mutex
instruments []delegatedInstrument
callbacks []delegatedCallback
delegate atomic.Value // metric.Meter
}
type delegatedInstrument interface {
setDelegate(metric.Meter)
}
// setDelegate configures m to delegate all Meter functionality to Meters
// created by provider.
//
// All subsequent calls to the Meter methods will be passed to the delegate.
//
// It is guaranteed by the caller that this happens only once.
func (m *meter) setDelegate(provider metric.MeterProvider) {
meter := provider.Meter(m.name, m.opts...)
m.delegate.Store(meter)
m.mtx.Lock()
defer m.mtx.Unlock()
for _, inst := range m.instruments {
inst.setDelegate(meter)
}
for _, callback := range m.callbacks {
callback.setDelegate(meter)
}
m.instruments = nil
m.callbacks = nil
}
// AsyncInt64 is the namespace for the Asynchronous Integer instruments.
//
// To Observe data with instruments it must be registered in a callback.
func (m *meter) AsyncInt64() asyncint64.InstrumentProvider {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.AsyncInt64()
}
return (*aiInstProvider)(m)
}
// AsyncFloat64 is the namespace for the Asynchronous Float instruments.
//
// To Observe data with instruments it must be registered in a callback.
func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.AsyncFloat64()
}
return (*afInstProvider)(m)
}
// RegisterCallback captures the function that will be called during Collect.
//
// It is only valid to call Observe within the scope of the passed function,
// and only on the instruments that were registered with this call.
func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error {
if del, ok := m.delegate.Load().(metric.Meter); ok {
insts = unwrapInstruments(insts)
return del.RegisterCallback(insts, function)
}
m.mtx.Lock()
defer m.mtx.Unlock()
m.callbacks = append(m.callbacks, delegatedCallback{
instruments: insts,
function: function,
})
return nil
}
type wrapped interface {
unwrap() instrument.Asynchronous
}
func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous {
out := make([]instrument.Asynchronous, 0, len(instruments))
for _, inst := range instruments {
if in, ok := inst.(wrapped); ok {
out = append(out, in.unwrap())
} else {
out = append(out, inst)
}
}
return out
}
// SyncInt64 is the namespace for the Synchronous Integer instruments.
func (m *meter) SyncInt64() syncint64.InstrumentProvider {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.SyncInt64()
}
return (*siInstProvider)(m)
}
// SyncFloat64 is the namespace for the Synchronous Float instruments.
func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.SyncFloat64()
}
return (*sfInstProvider)(m)
}
type delegatedCallback struct {
instruments []instrument.Asynchronous
function func(context.Context)
}
func (c *delegatedCallback) setDelegate(m metric.Meter) {
insts := unwrapInstruments(c.instruments)
err := m.RegisterCallback(insts, c.function)
if err != nil {
otel.Handle(err)
}
}
type afInstProvider meter
// Counter creates an instrument for recording increasing values.
func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &afCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// UpDownCounter creates an instrument for recording changes of a value.
func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &afUpDownCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// Gauge creates an instrument for recording the current value.
func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &afGauge{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
type aiInstProvider meter
// Counter creates an instrument for recording increasing values.
func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &aiCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// UpDownCounter creates an instrument for recording changes of a value.
func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &aiUpDownCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// Gauge creates an instrument for recording the current value.
func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &aiGauge{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
type sfInstProvider meter
// Counter creates an instrument for recording increasing values.
func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &sfCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// UpDownCounter creates an instrument for recording changes of a value.
func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &sfUpDownCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// Histogram creates an instrument for recording a distribution of values.
func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &sfHistogram{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
type siInstProvider meter
// Counter creates an instrument for recording increasing values.
func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &siCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// UpDownCounter creates an instrument for recording changes of a value.
func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &siUpDownCounter{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}
// Histogram creates an instrument for recording a distribution of values.
func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
ip.mtx.Lock()
defer ip.mtx.Unlock()
ctr := &siHistogram{name: name, opts: opts}
ip.instruments = append(ip.instruments, ctr)
return ctr, nil
}

View File

@ -0,0 +1,68 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// htmp://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
"errors"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
)
var (
globalMeterProvider = defaultMeterProvider()
delegateMeterOnce sync.Once
)
type meterProviderHolder struct {
mp metric.MeterProvider
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeterProvider.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
current := MeterProvider()
if _, cOk := current.(*meterProvider); cOk {
if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
// Do not assign the default delegating MeterProvider to delegate
// to itself.
global.Error(
errors.New("no delegate configured in meter provider"),
"Setting meter provider to it's current value. No delegate will be configured",
)
return
}
}
delegateMeterOnce.Do(func() {
if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
func defaultMeterProvider() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: &meterProvider{}})
return v
}

60
vendor/go.opentelemetry.io/otel/metric/meter.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// MeterProvider provides access to named Meter instances, for instrumenting
// an application or library.
type MeterProvider interface {
// Meter creates an instance of a `Meter` interface. The instrumentationName
// must be the name of the library providing instrumentation. This name may
// be the same as the instrumented code only if that code provides built-in
// instrumentation. If the instrumentationName is empty, then a
// implementation defined default name will be used instead.
Meter(instrumentationName string, opts ...MeterOption) Meter
}
// Meter provides access to instrument instances for recording metrics.
type Meter interface {
// AsyncInt64 is the namespace for the Asynchronous Integer instruments.
//
// To Observe data with instruments it must be registered in a callback.
AsyncInt64() asyncint64.InstrumentProvider
// AsyncFloat64 is the namespace for the Asynchronous Float instruments
//
// To Observe data with instruments it must be registered in a callback.
AsyncFloat64() asyncfloat64.InstrumentProvider
// RegisterCallback captures the function that will be called during Collect.
//
// It is only valid to call Observe within the scope of the passed function,
// and only on the instruments that were registered with this call.
RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error
// SyncInt64 is the namespace for the Synchronous Integer instruments
SyncInt64() syncint64.InstrumentProvider
// SyncFloat64 is the namespace for the Synchronous Float instruments
SyncFloat64() syncfloat64.InstrumentProvider
}

View File

@ -1,577 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/unit"
)
// MeterProvider supports named Meter instances.
type MeterProvider interface {
// Meter creates an implementation of the Meter interface.
// The instrumentationName must be the name of the library providing
// instrumentation. This name may be the same as the instrumented code
// only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default
// name will be used instead.
Meter(instrumentationName string, opts ...MeterOption) Meter
}
// Meter is the creator of metric instruments.
//
// An uninitialized Meter is a no-op implementation.
type Meter struct {
impl MeterImpl
name, version string
}
// RecordBatch atomically records a batch of measurements.
func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) {
if m.impl == nil {
return
}
m.impl.RecordBatch(ctx, ls, ms...)
}
// NewBatchObserver creates a new BatchObserver that supports
// making batches of observations for multiple instruments.
func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver {
return BatchObserver{
meter: m,
runner: newBatchAsyncRunner(callback),
}
}
// NewInt64Counter creates a new integer Counter instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) {
return wrapInt64CounterInstrument(
m.newSync(name, CounterInstrumentKind, number.Int64Kind, options))
}
// NewFloat64Counter creates a new floating point Counter with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) {
return wrapFloat64CounterInstrument(
m.newSync(name, CounterInstrumentKind, number.Float64Kind, options))
}
// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) {
return wrapInt64UpDownCounterInstrument(
m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, options))
}
// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) {
return wrapFloat64UpDownCounterInstrument(
m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, options))
}
// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) {
return wrapInt64ValueRecorderInstrument(
m.newSync(name, ValueRecorderInstrumentKind, number.Int64Kind, opts))
}
// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) {
return wrapFloat64ValueRecorderInstrument(
m.newSync(name, ValueRecorderInstrumentKind, number.Float64Kind, opts))
}
// NewInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) {
if callback == nil {
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ValueObserverInstrument(
m.newAsync(name, ValueObserverInstrumentKind, number.Int64Kind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) {
if callback == nil {
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ValueObserverInstrument(
m.newAsync(name, ValueObserverInstrumentKind, number.Float64Kind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) {
if callback == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
m.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64SumObserver creates a new floating point SumObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) {
if callback == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
m.newAsync(name, SumObserverInstrumentKind, number.Float64Kind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
if callback == nil {
return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64UpDownSumObserverInstrument(
m.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
if callback == nil {
return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64UpDownSumObserverInstrument(
m.newAsync(name, UpDownSumObserverInstrumentKind, number.Float64Kind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) {
if b.runner == nil {
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverInstrumentKind, number.Int64Kind, opts, b.runner))
}
// NewFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) {
if b.runner == nil {
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverInstrumentKind, number.Float64Kind, opts,
b.runner))
}
// NewInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) {
if b.runner == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner))
}
// NewFloat64SumObserver creates a new floating point SumObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) {
if b.runner == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
b.meter.newAsync(name, SumObserverInstrumentKind, number.Float64Kind, opts,
b.runner))
}
// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
if b.runner == nil {
return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64UpDownSumObserverInstrument(
b.meter.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts, b.runner))
}
// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
if b.runner == nil {
return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64UpDownSumObserverInstrument(
b.meter.newAsync(name, UpDownSumObserverInstrumentKind, number.Float64Kind, opts,
b.runner))
}
// MeterImpl returns the underlying MeterImpl of this Meter.
func (m Meter) MeterImpl() MeterImpl {
return m.impl
}
// newAsync constructs one new asynchronous instrument.
func (m Meter) newAsync(
name string,
mkind InstrumentKind,
nkind number.Kind,
opts []InstrumentOption,
runner AsyncRunner,
) (
AsyncImpl,
error,
) {
if m.impl == nil {
return NoopAsync{}, nil
}
desc := NewDescriptor(name, mkind, nkind, opts...)
desc.config.InstrumentationName = m.name
desc.config.InstrumentationVersion = m.version
return m.impl.NewAsyncInstrument(desc, runner)
}
// newSync constructs one new synchronous instrument.
func (m Meter) newSync(
name string,
metricKind InstrumentKind,
numberKind number.Kind,
opts []InstrumentOption,
) (
SyncImpl,
error,
) {
if m.impl == nil {
return NoopSync{}, nil
}
desc := NewDescriptor(name, metricKind, numberKind, opts...)
desc.config.InstrumentationName = m.name
desc.config.InstrumentationVersion = m.version
return m.impl.NewSyncInstrument(desc)
}
// MeterMust is a wrapper for Meter interfaces that panics when any
// instrument constructor encounters an error.
type MeterMust struct {
meter Meter
}
// BatchObserverMust is a wrapper for BatchObserver that panics when
// any instrument constructor encounters an error.
type BatchObserverMust struct {
batch BatchObserver
}
// Must constructs a MeterMust implementation from a Meter, allowing
// the application to panic when any instrument constructor yields an
// error.
func Must(meter Meter) MeterMust {
return MeterMust{meter: meter}
}
// NewInt64Counter calls `Meter.NewInt64Counter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter {
if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter {
if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter {
if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter {
if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder {
if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder {
if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver {
if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver {
if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver {
if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver {
if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver {
if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver {
if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewBatchObserver returns a wrapper around BatchObserver that panics
// when any instrument constructor returns an error.
func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust {
return BatchObserverMust{
batch: mm.meter.NewBatchObserver(callback),
}
}
// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver {
if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver {
if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver {
if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver {
if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver {
if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver {
if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// Descriptor contains all the settings that describe an instrument,
// including its name, metric kind, number kind, and the configurable
// options.
type Descriptor struct {
name string
instrumentKind InstrumentKind
numberKind number.Kind
config InstrumentConfig
}
// NewDescriptor returns a Descriptor with the given contents.
func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, opts ...InstrumentOption) Descriptor {
return Descriptor{
name: name,
instrumentKind: ikind,
numberKind: nkind,
config: NewInstrumentConfig(opts...),
}
}
// Name returns the metric instrument's name.
func (d Descriptor) Name() string {
return d.name
}
// InstrumentKind returns the specific kind of instrument.
func (d Descriptor) InstrumentKind() InstrumentKind {
return d.instrumentKind
}
// Description provides a human-readable description of the metric
// instrument.
func (d Descriptor) Description() string {
return d.config.Description
}
// Unit describes the units of the metric instrument. Unitless
// metrics return the empty string.
func (d Descriptor) Unit() unit.Unit {
return d.config.Unit
}
// NumberKind returns whether this instrument is declared over int64,
// float64, or uint64 values.
func (d Descriptor) NumberKind() number.Kind {
return d.numberKind
}
// InstrumentationName returns the name of the library that provided
// instrumentation for this instrument.
func (d Descriptor) InstrumentationName() string {
return d.config.InstrumentationName
}
// InstrumentationVersion returns the version of the library that provided
// instrumentation for this instrument.
func (d Descriptor) InstrumentationVersion() string {
return d.config.InstrumentationVersion
}

View File

@ -1,777 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=InstrumentKind
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"errors"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/number"
)
// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil.
var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation")
// InstrumentKind describes the kind of instrument.
type InstrumentKind int8
const (
// ValueRecorderInstrumentKind indicates a ValueRecorder instrument.
ValueRecorderInstrumentKind InstrumentKind = iota
// ValueObserverInstrumentKind indicates an ValueObserver instrument.
ValueObserverInstrumentKind
// CounterInstrumentKind indicates a Counter instrument.
CounterInstrumentKind
// UpDownCounterInstrumentKind indicates a UpDownCounter instrument.
UpDownCounterInstrumentKind
// SumObserverInstrumentKind indicates a SumObserver instrument.
SumObserverInstrumentKind
// UpDownSumObserverInstrumentKind indicates a UpDownSumObserver
// instrument.
UpDownSumObserverInstrumentKind
)
// Synchronous returns whether this is a synchronous kind of instrument.
func (k InstrumentKind) Synchronous() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, ValueRecorderInstrumentKind:
return true
}
return false
}
// Asynchronous returns whether this is an asynchronous kind of instrument.
func (k InstrumentKind) Asynchronous() bool {
return !k.Synchronous()
}
// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
func (k InstrumentKind) Adding() bool {
switch k {
case CounterInstrumentKind, UpDownCounterInstrumentKind, SumObserverInstrumentKind, UpDownSumObserverInstrumentKind:
return true
}
return false
}
// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding).
func (k InstrumentKind) Grouping() bool {
return !k.Adding()
}
// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
func (k InstrumentKind) Monotonic() bool {
switch k {
case CounterInstrumentKind, SumObserverInstrumentKind:
return true
}
return false
}
// PrecomputedSum returns whether this kind of instrument receives precomputed sums.
func (k InstrumentKind) PrecomputedSum() bool {
return k.Adding() && k.Asynchronous()
}
// Observation is used for reporting an asynchronous batch of metric
// values. Instances of this type should be created by asynchronous
// instruments (e.g., Int64ValueObserver.Observation()).
type Observation struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument AsyncImpl
}
// Int64ObserverFunc is a type of callback that integral
// observers run.
type Int64ObserverFunc func(context.Context, Int64ObserverResult)
// Float64ObserverFunc is a type of callback that floating point
// observers run.
type Float64ObserverFunc func(context.Context, Float64ObserverResult)
// BatchObserverFunc is a callback argument for use with any
// Observer instrument that will be reported as a batch of
// observations.
type BatchObserverFunc func(context.Context, BatchObserverResult)
// Int64ObserverResult is passed to an observer callback to capture
// observations for one asynchronous integer metric instrument.
type Int64ObserverResult struct {
instrument AsyncImpl
function func([]attribute.KeyValue, ...Observation)
}
// Float64ObserverResult is passed to an observer callback to capture
// observations for one asynchronous floating point metric instrument.
type Float64ObserverResult struct {
instrument AsyncImpl
function func([]attribute.KeyValue, ...Observation)
}
// BatchObserverResult is passed to a batch observer callback to
// capture observations for multiple asynchronous instruments.
type BatchObserverResult struct {
function func([]attribute.KeyValue, ...Observation)
}
// Observe captures a single integer value from the associated
// instrument callback, with the given labels.
func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) {
ir.function(labels, Observation{
instrument: ir.instrument,
number: number.NewInt64Number(value),
})
}
// Observe captures a single floating point value from the associated
// instrument callback, with the given labels.
func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) {
fr.function(labels, Observation{
instrument: fr.instrument,
number: number.NewFloat64Number(value),
})
}
// Observe captures a multiple observations from the associated batch
// instrument callback, with the given labels.
func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) {
br.function(labels, obs...)
}
// AsyncRunner is expected to convert into an AsyncSingleRunner or an
// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
// does not satisfy one of these interfaces.
type AsyncRunner interface {
// AnyRunner() is a non-exported method with no functional use
// other than to make this a non-empty interface.
AnyRunner()
}
// AsyncSingleRunner is an interface implemented by single-observer
// callbacks.
type AsyncSingleRunner interface {
// Run accepts a single instrument and function for capturing
// observations of that instrument. Each call to the function
// receives one captured observation. (The function accepts
// multiple observations so the same implementation can be
// used for batch runners.)
Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
// AsyncBatchRunner is an interface implemented by batch-observer
// callbacks.
type AsyncBatchRunner interface {
// Run accepts a function for capturing observations of
// multiple instruments.
Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation))
AsyncRunner
}
var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil)
var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil)
var _ AsyncBatchRunner = (*BatchObserverFunc)(nil)
// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments.
func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner {
return &c
}
// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments.
func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner {
return &c
}
// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments.
func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner {
return &c
}
// AnyRunner implements AsyncRunner.
func (*Int64ObserverFunc) AnyRunner() {}
// AnyRunner implements AsyncRunner.
func (*Float64ObserverFunc) AnyRunner() {}
// AnyRunner implements AsyncRunner.
func (*BatchObserverFunc) AnyRunner() {}
// Run implements AsyncSingleRunner.
func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]attribute.KeyValue, ...Observation)) {
(*i)(ctx, Int64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncSingleRunner.
func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]attribute.KeyValue, ...Observation)) {
(*f)(ctx, Float64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncBatchRunner.
func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) {
(*b)(ctx, BatchObserverResult{
function: function,
})
}
// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64ValueObserver{asyncInstrument: common}, err
}
// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64ValueObserver{asyncInstrument: common}, err
}
// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64SumObserver{asyncInstrument: common}, err
}
// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64SumObserver{asyncInstrument: common}, err
}
// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver.
func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64UpDownSumObserver{asyncInstrument: common}, err
}
// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver.
func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64UpDownSumObserver{asyncInstrument: common}, err
}
// BatchObserver represents an Observer callback that can report
// observations for multiple instruments.
type BatchObserver struct {
meter Meter
runner AsyncBatchRunner
}
// Int64ValueObserver is a metric that captures a set of int64 values at a
// point in time.
type Int64ValueObserver struct {
asyncInstrument
}
// Float64ValueObserver is a metric that captures a set of float64 values
// at a point in time.
type Float64ValueObserver struct {
asyncInstrument
}
// Int64SumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64SumObserver struct {
asyncInstrument
}
// Float64SumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64SumObserver struct {
asyncInstrument
}
// Int64UpDownSumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64UpDownSumObserver struct {
asyncInstrument
}
// Float64UpDownSumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64UpDownSumObserver struct {
asyncInstrument
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64ValueObserver) Observation(v int64) Observation {
return Observation{
number: number.NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64ValueObserver) Observation(v float64) Observation {
return Observation{
number: number.NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64SumObserver) Observation(v int64) Observation {
return Observation{
number: number.NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64SumObserver) Observation(v float64) Observation {
return Observation{
number: number.NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64UpDownSumObserver) Observation(v int64) Observation {
return Observation{
number: number.NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64UpDownSumObserver) Observation(v float64) Observation {
return Observation{
number: number.NewFloat64Number(v),
instrument: f.instrument,
}
}
// Measurement is used for reporting a synchronous batch of metric
// values. Instances of this type should be created by synchronous
// instruments (e.g., Int64Counter.Measurement()).
type Measurement struct {
// number needs to be aligned for 64-bit atomic operations.
number number.Number
instrument SyncImpl
}
// syncInstrument contains a SyncImpl.
type syncInstrument struct {
instrument SyncImpl
}
// syncBoundInstrument contains a BoundSyncImpl.
type syncBoundInstrument struct {
boundInstrument BoundSyncImpl
}
// asyncInstrument contains a AsyncImpl.
type asyncInstrument struct {
instrument AsyncImpl
}
// SyncImpl returns the instrument that created this measurement.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Measurement) SyncImpl() SyncImpl {
return m.instrument
}
// Number returns a number recorded in this measurement.
func (m Measurement) Number() number.Number {
return m.number
}
// AsyncImpl returns the instrument that created this observation.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Observation) AsyncImpl() AsyncImpl {
return m.instrument
}
// Number returns a number recorded in this observation.
func (m Observation) Number() number.Number {
return m.number
}
// AsyncImpl implements AsyncImpl.
func (a asyncInstrument) AsyncImpl() AsyncImpl {
return a.instrument
}
// SyncImpl returns the implementation object for synchronous instruments.
func (s syncInstrument) SyncImpl() SyncImpl {
return s.instrument
}
func (s syncInstrument) bind(labels []attribute.KeyValue) syncBoundInstrument {
return newSyncBoundInstrument(s.instrument.Bind(labels))
}
func (s syncInstrument) float64Measurement(value float64) Measurement {
return newMeasurement(s.instrument, number.NewFloat64Number(value))
}
func (s syncInstrument) int64Measurement(value int64) Measurement {
return newMeasurement(s.instrument, number.NewInt64Number(value))
}
func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) {
s.instrument.RecordOne(ctx, number, labels)
}
func (h syncBoundInstrument) directRecord(ctx context.Context, number number.Number) {
h.boundInstrument.RecordOne(ctx, number)
}
// Unbind calls SyncImpl.Unbind.
func (h syncBoundInstrument) Unbind() {
h.boundInstrument.Unbind()
}
// checkNewAsync receives an AsyncImpl and potential
// error, and returns the same types, checking for and ensuring that
// the returned interface is not nil.
func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) {
if instrument == nil {
if err == nil {
err = ErrSDKReturnedNilImpl
}
instrument = NoopAsync{}
}
return asyncInstrument{
instrument: instrument,
}, err
}
// checkNewSync receives an SyncImpl and potential
// error, and returns the same types, checking for and ensuring that
// the returned interface is not nil.
func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) {
if instrument == nil {
if err == nil {
err = ErrSDKReturnedNilImpl
}
// Note: an alternate behavior would be to synthesize a new name
// or group all duplicately-named instruments of a certain type
// together and use a tag for the original name, e.g.,
// name = 'invalid.counter.int64'
// label = 'original-name=duplicate-counter-name'
instrument = NoopSync{}
}
return syncInstrument{
instrument: instrument,
}, err
}
func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument {
return syncBoundInstrument{
boundInstrument: boundInstrument,
}
}
func newMeasurement(instrument SyncImpl, number number.Number) Measurement {
return Measurement{
instrument: instrument,
number: number,
}
}
// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Int64Counter{syncInstrument: common}, err
}
// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Float64Counter{syncInstrument: common}, err
}
// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Int64UpDownCounter{syncInstrument: common}, err
}
// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Float64UpDownCounter{syncInstrument: common}, err
}
// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Int64ValueRecorder{syncInstrument: common}, err
}
// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Float64ValueRecorder{syncInstrument: common}, err
}
// Float64Counter is a metric that accumulates float64 values.
type Float64Counter struct {
syncInstrument
}
// Int64Counter is a metric that accumulates int64 values.
type Int64Counter struct {
syncInstrument
}
// BoundFloat64Counter is a bound instrument for Float64Counter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64Counter struct {
syncBoundInstrument
}
// BoundInt64Counter is a boundInstrument for Int64Counter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64Counter struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64Counter) Bind(labels ...attribute.KeyValue) (h BoundFloat64Counter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64Counter) Bind(labels ...attribute.KeyValue) (h BoundInt64Counter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64Counter) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64Counter) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewFloat64Number(value), labels)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewInt64Number(value), labels)
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundFloat64Counter) Add(ctx context.Context, value float64) {
b.directRecord(ctx, number.NewFloat64Number(value))
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundInt64Counter) Add(ctx context.Context, value int64) {
b.directRecord(ctx, number.NewInt64Number(value))
}
// Float64UpDownCounter is a metric instrument that sums floating
// point values.
type Float64UpDownCounter struct {
syncInstrument
}
// Int64UpDownCounter is a metric instrument that sums integer values.
type Int64UpDownCounter struct {
syncInstrument
}
// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64UpDownCounter struct {
syncBoundInstrument
}
// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64UpDownCounter struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64UpDownCounter) Bind(labels ...attribute.KeyValue) (h BoundFloat64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64UpDownCounter) Bind(labels ...attribute.KeyValue) (h BoundInt64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64UpDownCounter) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64UpDownCounter) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewFloat64Number(value), labels)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewInt64Number(value), labels)
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
b.directRecord(ctx, number.NewFloat64Number(value))
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
b.directRecord(ctx, number.NewInt64Number(value))
}
// Float64ValueRecorder is a metric that records float64 values.
type Float64ValueRecorder struct {
syncInstrument
}
// Int64ValueRecorder is a metric that records int64 values.
type Int64ValueRecorder struct {
syncInstrument
}
// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64ValueRecorder struct {
syncBoundInstrument
}
// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64ValueRecorder struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64ValueRecorder) Bind(labels ...attribute.KeyValue) (h BoundFloat64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64ValueRecorder) Bind(labels ...attribute.KeyValue) (h BoundInt64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64ValueRecorder) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64ValueRecorder) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Record adds a new value to the list of ValueRecorder's records. The
// labels should contain the keys and values to be associated with
// this value.
func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewFloat64Number(value), labels)
}
// Record adds a new value to the ValueRecorder's distribution. The
// labels should contain the keys and values to be associated with
// this value.
func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) {
c.directRecord(ctx, number.NewInt64Number(value), labels)
}
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
b.directRecord(ctx, number.NewFloat64Number(value))
}
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
b.directRecord(ctx, number.NewInt64Number(value))
}

View File

@ -1,59 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/number"
)
type NoopMeterProvider struct{}
type noopInstrument struct{}
type noopBoundInstrument struct{}
type NoopSync struct{ noopInstrument }
type NoopAsync struct{ noopInstrument }
var _ MeterProvider = NoopMeterProvider{}
var _ SyncImpl = NoopSync{}
var _ BoundSyncImpl = noopBoundInstrument{}
var _ AsyncImpl = NoopAsync{}
func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter {
return Meter{}
}
func (noopInstrument) Implementation() interface{} {
return nil
}
func (noopInstrument) Descriptor() Descriptor {
return Descriptor{}
}
func (noopBoundInstrument) RecordOne(context.Context, number.Number) {
}
func (noopBoundInstrument) Unbind() {
}
func (NoopSync) Bind([]attribute.KeyValue) BoundSyncImpl {
return noopBoundInstrument{}
}
func (NoopSync) RecordOne(context.Context, number.Number, []attribute.KeyValue) {
}

View File

@ -1,95 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/number"
)
// MeterImpl is the interface an SDK must implement to supply a Meter
// implementation.
type MeterImpl interface {
// RecordBatch atomically records a batch of measurements.
RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement)
// NewSyncInstrument returns a newly constructed
// synchronous instrument implementation or an error, should
// one occur.
NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
// NewAsyncInstrument returns a newly constructed
// asynchronous instrument implementation or an error, should
// one occur.
NewAsyncInstrument(
descriptor Descriptor,
runner AsyncRunner,
) (AsyncImpl, error)
}
// InstrumentImpl is a common interface for synchronous and
// asynchronous instruments.
type InstrumentImpl interface {
// Implementation returns the underlying implementation of the
// instrument, which allows the implementation to gain access
// to its own representation especially from a `Measurement`.
Implementation() interface{}
// Descriptor returns a copy of the instrument's Descriptor.
Descriptor() Descriptor
}
// SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., ValueRecorder and Counter instruments).
type SyncImpl interface {
InstrumentImpl
// Bind creates an implementation-level bound instrument,
// binding a label set with this instrument implementation.
Bind(labels []attribute.KeyValue) BoundSyncImpl
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue)
}
// BoundSyncImpl is the implementation-level interface to a
// generic bound synchronous instrument
type BoundSyncImpl interface {
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number number.Number)
// Unbind frees the resources associated with this bound instrument. It
// does not affect the metric this bound instrument was created through.
Unbind()
}
// AsyncImpl is an implementation-level interface to an
// asynchronous instrument (e.g., Observer instruments).
type AsyncImpl interface {
InstrumentImpl
}
// WrapMeterImpl constructs a `Meter` implementation from a
// `MeterImpl` implementation.
func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
return Meter{
impl: impl,
name: instrumentationName,
version: NewMeterConfig(opts...).InstrumentationVersion,
}
}

181
vendor/go.opentelemetry.io/otel/metric/noop.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
)
// NewNoopMeterProvider creates a MeterProvider that does not record any metrics.
func NewNoopMeterProvider() MeterProvider {
return noopMeterProvider{}
}
type noopMeterProvider struct{}
func (noopMeterProvider) Meter(string, ...MeterOption) Meter {
return noopMeter{}
}
// NewNoopMeter creates a Meter that does not record any metrics.
func NewNoopMeter() Meter {
return noopMeter{}
}
type noopMeter struct{}
// AsyncInt64 creates an instrument that does not record any metrics.
func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider {
return nonrecordingAsyncInt64Instrument{}
}
// AsyncFloat64 creates an instrument that does not record any metrics.
func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider {
return nonrecordingAsyncFloat64Instrument{}
}
// SyncInt64 creates an instrument that does not record any metrics.
func (noopMeter) SyncInt64() syncint64.InstrumentProvider {
return nonrecordingSyncInt64Instrument{}
}
// SyncFloat64 creates an instrument that does not record any metrics.
func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider {
return nonrecordingSyncFloat64Instrument{}
}
// RegisterCallback creates a register callback that does not record any metrics.
func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error {
return nil
}
type nonrecordingAsyncFloat64Instrument struct {
instrument.Asynchronous
}
var (
_ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{}
_ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{}
_ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{}
_ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{}
)
func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) {
return n, nil
}
func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
return n, nil
}
func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) {
return n, nil
}
func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) {
}
type nonrecordingAsyncInt64Instrument struct {
instrument.Asynchronous
}
var (
_ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{}
_ asyncint64.Counter = nonrecordingAsyncInt64Instrument{}
_ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{}
_ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{}
)
func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) {
return n, nil
}
func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) {
return n, nil
}
func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) {
return n, nil
}
func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) {
}
type nonrecordingSyncFloat64Instrument struct {
instrument.Synchronous
}
var (
_ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{}
_ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{}
_ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{}
_ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{}
)
func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) {
return n, nil
}
func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) {
return n, nil
}
func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) {
return n, nil
}
func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {
}
func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {
}
type nonrecordingSyncInt64Instrument struct {
instrument.Synchronous
}
var (
_ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{}
_ syncint64.Counter = nonrecordingSyncInt64Instrument{}
_ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{}
_ syncint64.Histogram = nonrecordingSyncInt64Instrument{}
)
func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) {
return n, nil
}
func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) {
return n, nil
}
func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) {
return n, nil
}
func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {
}
func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {
}

View File

@ -1,23 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package number provides a number abstraction for instruments that
either support int64 or float64 input values.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
*/
package number // import "go.opentelemetry.io/otel/metric/number"

View File

@ -1,24 +0,0 @@
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
package number
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Int64Kind-0]
_ = x[Float64Kind-1]
}
const _Kind_name = "Int64KindFloat64Kind"
var _Kind_index = [...]uint8{0, 9, 20}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}

View File

@ -1,538 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package number // import "go.opentelemetry.io/otel/metric/number"
//go:generate stringer -type=Kind
import (
"fmt"
"math"
"sync/atomic"
"go.opentelemetry.io/otel/internal"
)
// Kind describes the data type of the Number.
type Kind int8
const (
// Int64Kind means that the Number stores int64.
Int64Kind Kind = iota
// Float64Kind means that the Number stores float64.
Float64Kind
)
// Zero returns a zero value for a given Kind
func (k Kind) Zero() Number {
switch k {
case Int64Kind:
return NewInt64Number(0)
case Float64Kind:
return NewFloat64Number(0.)
default:
return Number(0)
}
}
// Minimum returns the minimum representable value
// for a given Kind
func (k Kind) Minimum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MinInt64)
case Float64Kind:
return NewFloat64Number(-1. * math.MaxFloat64)
default:
return Number(0)
}
}
// Maximum returns the maximum representable value
// for a given Kind
func (k Kind) Maximum() Number {
switch k {
case Int64Kind:
return NewInt64Number(math.MaxInt64)
case Float64Kind:
return NewFloat64Number(math.MaxFloat64)
default:
return Number(0)
}
}
// Number represents either an integral or a floating point value. It
// needs to be accompanied with a source of Kind that describes
// the actual type of the value stored within Number.
type Number uint64
// - constructors
// NewNumberFromRaw creates a new Number from a raw value.
func NewNumberFromRaw(r uint64) Number {
return Number(r)
}
// NewInt64Number creates an integral Number.
func NewInt64Number(i int64) Number {
return NewNumberFromRaw(internal.Int64ToRaw(i))
}
// NewFloat64Number creates a floating point Number.
func NewFloat64Number(f float64) Number {
return NewNumberFromRaw(internal.Float64ToRaw(f))
}
// NewNumberSignChange returns a number with the same magnitude and
// the opposite sign. `kind` must describe the kind of number in `nn`.
func NewNumberSignChange(kind Kind, nn Number) Number {
switch kind {
case Int64Kind:
return NewInt64Number(-nn.AsInt64())
case Float64Kind:
return NewFloat64Number(-nn.AsFloat64())
}
return nn
}
// - as x
// AsNumber gets the Number.
func (n *Number) AsNumber() Number {
return *n
}
// AsRaw gets the uninterpreted raw value. Might be useful for some
// atomic operations.
func (n *Number) AsRaw() uint64 {
return uint64(*n)
}
// AsInt64 assumes that the value contains an int64 and returns it as
// such.
func (n *Number) AsInt64() int64 {
return internal.RawToInt64(n.AsRaw())
}
// AsFloat64 assumes that the measurement value contains a float64 and
// returns it as such.
func (n *Number) AsFloat64() float64 {
return internal.RawToFloat64(n.AsRaw())
}
// - as x atomic
// AsNumberAtomic gets the Number atomically.
func (n *Number) AsNumberAtomic() Number {
return NewNumberFromRaw(n.AsRawAtomic())
}
// AsRawAtomic gets the uninterpreted raw value atomically. Might be
// useful for some atomic operations.
func (n *Number) AsRawAtomic() uint64 {
return atomic.LoadUint64(n.AsRawPtr())
}
// AsInt64Atomic assumes that the number contains an int64 and returns
// it as such atomically.
func (n *Number) AsInt64Atomic() int64 {
return atomic.LoadInt64(n.AsInt64Ptr())
}
// AsFloat64Atomic assumes that the measurement value contains a
// float64 and returns it as such atomically.
func (n *Number) AsFloat64Atomic() float64 {
return internal.RawToFloat64(n.AsRawAtomic())
}
// - as x ptr
// AsRawPtr gets the pointer to the raw, uninterpreted raw
// value. Might be useful for some atomic operations.
func (n *Number) AsRawPtr() *uint64 {
return (*uint64)(n)
}
// AsInt64Ptr assumes that the number contains an int64 and returns a
// pointer to it.
func (n *Number) AsInt64Ptr() *int64 {
return internal.RawPtrToInt64Ptr(n.AsRawPtr())
}
// AsFloat64Ptr assumes that the number contains a float64 and returns a
// pointer to it.
func (n *Number) AsFloat64Ptr() *float64 {
return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
}
// - coerce
// CoerceToInt64 casts the number to int64. May result in
// data/precision loss.
func (n *Number) CoerceToInt64(kind Kind) int64 {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return int64(n.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CoerceToFloat64 casts the number to float64. May result in
// data/precision loss.
func (n *Number) CoerceToFloat64(kind Kind) float64 {
switch kind {
case Int64Kind:
return float64(n.AsInt64())
case Float64Kind:
return n.AsFloat64()
default:
// you get what you deserve
return 0
}
}
// - set
// SetNumber sets the number to the passed number. Both should be of
// the same kind.
func (n *Number) SetNumber(nn Number) {
*n.AsRawPtr() = nn.AsRaw()
}
// SetRaw sets the number to the passed raw value. Both number and the
// raw number should represent the same kind.
func (n *Number) SetRaw(r uint64) {
*n.AsRawPtr() = r
}
// SetInt64 assumes that the number contains an int64 and sets it to
// the passed value.
func (n *Number) SetInt64(i int64) {
*n.AsInt64Ptr() = i
}
// SetFloat64 assumes that the number contains a float64 and sets it
// to the passed value.
func (n *Number) SetFloat64(f float64) {
*n.AsFloat64Ptr() = f
}
// - set atomic
// SetNumberAtomic sets the number to the passed number
// atomically. Both should be of the same kind.
func (n *Number) SetNumberAtomic(nn Number) {
atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
}
// SetRawAtomic sets the number to the passed raw value
// atomically. Both number and the raw number should represent the
// same kind.
func (n *Number) SetRawAtomic(r uint64) {
atomic.StoreUint64(n.AsRawPtr(), r)
}
// SetInt64Atomic assumes that the number contains an int64 and sets
// it to the passed value atomically.
func (n *Number) SetInt64Atomic(i int64) {
atomic.StoreInt64(n.AsInt64Ptr(), i)
}
// SetFloat64Atomic assumes that the number contains a float64 and
// sets it to the passed value atomically.
func (n *Number) SetFloat64Atomic(f float64) {
atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
}
// - swap
// SwapNumber sets the number to the passed number and returns the old
// number. Both this number and the passed number should be of the
// same kind.
func (n *Number) SwapNumber(nn Number) Number {
old := *n
n.SetNumber(nn)
return old
}
// SwapRaw sets the number to the passed raw value and returns the old
// raw value. Both number and the raw number should represent the same
// kind.
func (n *Number) SwapRaw(r uint64) uint64 {
old := n.AsRaw()
n.SetRaw(r)
return old
}
// SwapInt64 assumes that the number contains an int64, sets it to the
// passed value and returns the old int64 value.
func (n *Number) SwapInt64(i int64) int64 {
old := n.AsInt64()
n.SetInt64(i)
return old
}
// SwapFloat64 assumes that the number contains an float64, sets it to
// the passed value and returns the old float64 value.
func (n *Number) SwapFloat64(f float64) float64 {
old := n.AsFloat64()
n.SetFloat64(f)
return old
}
// - swap atomic
// SwapNumberAtomic sets the number to the passed number and returns
// the old number atomically. Both this number and the passed number
// should be of the same kind.
func (n *Number) SwapNumberAtomic(nn Number) Number {
return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
}
// SwapRawAtomic sets the number to the passed raw value and returns
// the old raw value atomically. Both number and the raw number should
// represent the same kind.
func (n *Number) SwapRawAtomic(r uint64) uint64 {
return atomic.SwapUint64(n.AsRawPtr(), r)
}
// SwapInt64Atomic assumes that the number contains an int64, sets it
// to the passed value and returns the old int64 value atomically.
func (n *Number) SwapInt64Atomic(i int64) int64 {
return atomic.SwapInt64(n.AsInt64Ptr(), i)
}
// SwapFloat64Atomic assumes that the number contains an float64, sets
// it to the passed value and returns the old float64 value
// atomically.
func (n *Number) SwapFloat64Atomic(f float64) float64 {
return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
}
// - add
// AddNumber assumes that this and the passed number are of the passed
// kind and adds the passed number to this number.
func (n *Number) AddNumber(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64(nn.AsInt64())
case Float64Kind:
n.AddFloat64(nn.AsFloat64())
}
}
// AddRaw assumes that this number and the passed raw value are of the
// passed kind and adds the passed raw value to this number.
func (n *Number) AddRaw(kind Kind, r uint64) {
n.AddNumber(kind, NewNumberFromRaw(r))
}
// AddInt64 assumes that the number contains an int64 and adds the
// passed int64 to it.
func (n *Number) AddInt64(i int64) {
*n.AsInt64Ptr() += i
}
// AddFloat64 assumes that the number contains a float64 and adds the
// passed float64 to it.
func (n *Number) AddFloat64(f float64) {
*n.AsFloat64Ptr() += f
}
// - add atomic
// AddNumberAtomic assumes that this and the passed number are of the
// passed kind and adds the passed number to this number atomically.
func (n *Number) AddNumberAtomic(kind Kind, nn Number) {
switch kind {
case Int64Kind:
n.AddInt64Atomic(nn.AsInt64())
case Float64Kind:
n.AddFloat64Atomic(nn.AsFloat64())
}
}
// AddRawAtomic assumes that this number and the passed raw value are
// of the passed kind and adds the passed raw value to this number
// atomically.
func (n *Number) AddRawAtomic(kind Kind, r uint64) {
n.AddNumberAtomic(kind, NewNumberFromRaw(r))
}
// AddInt64Atomic assumes that the number contains an int64 and adds
// the passed int64 to it atomically.
func (n *Number) AddInt64Atomic(i int64) {
atomic.AddInt64(n.AsInt64Ptr(), i)
}
// AddFloat64Atomic assumes that the number contains a float64 and
// adds the passed float64 to it atomically.
func (n *Number) AddFloat64Atomic(f float64) {
for {
o := n.AsFloat64Atomic()
if n.CompareAndSwapFloat64(o, o+f) {
break
}
}
}
// - compare and swap (atomic only)
// CompareAndSwapNumber does the atomic CAS operation on this
// number. This number and passed old and new numbers should be of the
// same kind.
func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
}
// CompareAndSwapRaw does the atomic CAS operation on this
// number. This number and passed old and new raw values should be of
// the same kind.
func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
}
// CompareAndSwapInt64 assumes that this number contains an int64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
}
// CompareAndSwapFloat64 assumes that this number contains a float64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
}
// - compare
// CompareNumber compares two Numbers given their kind. Both numbers
// should have the same kind. This returns:
// 0 if the numbers are equal
// -1 if the subject `n` is less than the argument `nn`
// +1 if the subject `n` is greater than the argument `nn`
func (n *Number) CompareNumber(kind Kind, nn Number) int {
switch kind {
case Int64Kind:
return n.CompareInt64(nn.AsInt64())
case Float64Kind:
return n.CompareFloat64(nn.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CompareRaw compares two numbers, where one is input as a raw
// uint64, interpreting both values as a `kind` of number.
func (n *Number) CompareRaw(kind Kind, r uint64) int {
return n.CompareNumber(kind, NewNumberFromRaw(r))
}
// CompareInt64 assumes that the Number contains an int64 and performs
// a comparison between the value and the other value. It returns the
// typical result of the compare function: -1 if the value is less
// than the other, 0 if both are equal, 1 if the value is greater than
// the other.
func (n *Number) CompareInt64(i int64) int {
this := n.AsInt64()
if this < i {
return -1
} else if this > i {
return 1
}
return 0
}
// CompareFloat64 assumes that the Number contains a float64 and
// performs a comparison between the value and the other value. It
// returns the typical result of the compare function: -1 if the value
// is less than the other, 0 if both are equal, 1 if the value is
// greater than the other.
//
// Do not compare NaN values.
func (n *Number) CompareFloat64(f float64) int {
this := n.AsFloat64()
if this < f {
return -1
} else if this > f {
return 1
}
return 0
}
// - relations to zero
// IsPositive returns true if the actual value is greater than zero.
func (n *Number) IsPositive(kind Kind) bool {
return n.compareWithZero(kind) > 0
}
// IsNegative returns true if the actual value is less than zero.
func (n *Number) IsNegative(kind Kind) bool {
return n.compareWithZero(kind) < 0
}
// IsZero returns true if the actual value is equal to zero.
func (n *Number) IsZero(kind Kind) bool {
return n.compareWithZero(kind) == 0
}
// - misc
// Emit returns a string representation of the raw value of the
// Number. A %d is used for integral values, %f for floating point
// values.
func (n *Number) Emit(kind Kind) string {
switch kind {
case Int64Kind:
return fmt.Sprintf("%d", n.AsInt64())
case Float64Kind:
return fmt.Sprintf("%f", n.AsFloat64())
default:
return ""
}
}
// AsInterface returns the number as an interface{}, typically used
// for Kind-correct JSON conversion.
func (n *Number) AsInterface(kind Kind) interface{} {
switch kind {
case Int64Kind:
return n.AsInt64()
case Float64Kind:
return n.AsFloat64()
default:
return math.NaN()
}
}
// - private stuff
func (n *Number) compareWithZero(kind Kind) int {
switch kind {
case Int64Kind:
return n.CompareInt64(0)
case Float64Kind:
return n.CompareFloat64(0.)
default:
// you get what you deserve
return 0
}
}

View File

@ -1,170 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry // import "go.opentelemetry.io/otel/metric/registry"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
)
// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
type MeterProvider struct {
impl metric.MeterImpl
}
var _ metric.MeterProvider = (*MeterProvider)(nil)
// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
// to wrap an implementation with uniqueness checking.
type uniqueInstrumentMeterImpl struct {
lock sync.Mutex
impl metric.MeterImpl
state map[key]metric.InstrumentImpl
}
var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
type key struct {
instrumentName string
instrumentationName string
InstrumentationVersion string
}
// NewMeterProvider returns a new provider that implements instrument
// name-uniqueness checking.
func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
return &MeterProvider{
impl: NewUniqueInstrumentMeterImpl(impl),
}
}
// Meter implements MeterProvider.
func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
}
// ErrMetricKindMismatch is the standard error for mismatched metric
// instrument definitions.
var ErrMetricKindMismatch = fmt.Errorf(
"a metric was already registered by this name with another kind or number type")
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
// the addition of uniqueness checking.
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
return &uniqueInstrumentMeterImpl{
impl: impl,
state: map[key]metric.InstrumentImpl{},
}
}
// RecordBatch implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) {
u.impl.RecordBatch(ctx, labels, ms...)
}
func keyOf(descriptor metric.Descriptor) key {
return key{
descriptor.Name(),
descriptor.InstrumentationName(),
descriptor.InstrumentationVersion(),
}
}
// NewMetricKindMismatchError formats an error that describes a
// mismatched metric instrument definition.
func NewMetricKindMismatchError(desc metric.Descriptor) error {
return fmt.Errorf("metric was %s (%s %s)registered as a %s %s: %w",
desc.Name(),
desc.InstrumentationName(),
desc.InstrumentationVersion(),
desc.NumberKind(),
desc.InstrumentKind(),
ErrMetricKindMismatch)
}
// Compatible determines whether two metric.Descriptors are considered
// the same for the purpose of uniqueness checking.
func Compatible(candidate, existing metric.Descriptor) bool {
return candidate.InstrumentKind() == existing.InstrumentKind() &&
candidate.NumberKind() == existing.NumberKind()
}
// checkUniqueness returns an ErrMetricKindMismatch error if there is
// a conflict between a descriptor that was already registered and the
// `descriptor` argument. If there is an existing compatible
// registration, this returns the already-registered instrument. If
// there is no conflict and no prior registration, returns (nil, nil).
func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
impl, ok := u.state[keyOf(descriptor)]
if !ok {
return nil, nil
}
if !Compatible(descriptor, impl.Descriptor()) {
return nil, NewMetricKindMismatchError(impl.Descriptor())
}
return impl, nil
}
// NewSyncInstrument implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(metric.SyncImpl), nil
}
syncInst, err := u.impl.NewSyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[keyOf(descriptor)] = syncInst
return syncInst, nil
}
// NewAsyncInstrument implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
descriptor metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(metric.AsyncImpl), nil
}
asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner)
if err != nil {
return nil, err
}
u.state[keyOf(descriptor)] = asyncInst
return asyncInst, nil
}

View File

@ -17,4 +17,4 @@
// This package is currently in a pre-GA phase. Backwards incompatible changes
// may be introduced in subsequent minor version releases as we work to track
// the evolving OpenTelemetry specification and user feedback.
package unit // import "go.opentelemetry.io/otel/unit"
package unit // import "go.opentelemetry.io/otel/metric/unit"

View File

@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package unit // import "go.opentelemetry.io/otel/unit"
package unit // import "go.opentelemetry.io/otel/metric/unit"
// Unit is a determinate standard quantity of measurement.
type Unit string
// Units defined by OpenTelemetry.
const (
Dimensionless Unit = "1"
Bytes Unit = "By"

View File

@ -1,95 +0,0 @@
#!/bin/bash
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
help()
{
printf "\n"
printf "Usage: $0 -t tag\n"
printf "\t-t Unreleased tag. Update all go.mod with this tag.\n"
exit 1 # Exit script after printing help
}
while getopts "t:" opt
do
case "$opt" in
t ) TAG="$OPTARG" ;;
? ) help ;; # Print help
esac
done
# Print help in case parameters are empty
if [ -z "$TAG" ]
then
printf "Tag is missing\n";
help
fi
# Validate semver
SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
if [[ "${TAG}" =~ ${SEMVER_REGEX} ]]; then
printf "${TAG} is valid semver tag.\n"
else
printf "${TAG} is not a valid semver tag.\n"
exit -1
fi
TAG_FOUND=`git tag --list ${TAG}`
if [[ ${TAG_FOUND} = ${TAG} ]] ; then
printf "Tag ${TAG} already exists\n"
exit -1
fi
# Get version for version.go
OTEL_VERSION=$(echo "${TAG}" | grep -o '^v[0-9]\+\.[0-9]\+\.[0-9]\+')
# Strip leading v
OTEL_VERSION="${OTEL_VERSION#v}"
cd $(dirname $0)
if ! git diff --quiet; then \
printf "Working tree is not clean, can't proceed with the release process\n"
git status
git diff
exit 1
fi
# Update version.go
cp ./version.go ./version.go.bak
sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_VERSION}\"/" ./version.go.bak >./version.go
rm -f ./version.go.bak
# Update go.mod
git checkout -b pre_release_${TAG} main
PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's/^\.\///' | sort)
for dir in $PACKAGE_DIRS; do
cp "${dir}/go.mod" "${dir}/go.mod.bak"
sed "s/opentelemetry.io\/otel\([^ ]*\) v[0-9]*\.[0-9]*\.[0-9]/opentelemetry.io\/otel\1 ${TAG}/" "${dir}/go.mod.bak" >"${dir}/go.mod"
rm -f "${dir}/go.mod.bak"
done
# Run lint to update go.sum
make lint
# Add changes and commit.
git add .
make ci
git commit -m "Prepare for releasing $TAG"
printf "Now run following to verify the changes.\ngit diff main\n"
printf "\nThen push the changes to upstream\n"

View File

@ -16,11 +16,8 @@ package propagation // import "go.opentelemetry.io/otel/propagation"
import (
"context"
"net/url"
"strings"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/baggage"
"go.opentelemetry.io/otel/baggage"
)
const baggageHeader = "baggage"
@ -28,81 +25,31 @@ const baggageHeader = "baggage"
// Baggage is a propagator that supports the W3C Baggage format.
//
// This propagates user-defined baggage associated with a trace. The complete
// specification is defined at https://w3c.github.io/baggage/.
// specification is defined at https://www.w3.org/TR/baggage/.
type Baggage struct{}
var _ TextMapPropagator = Baggage{}
// Inject sets baggage key-values from ctx into the carrier.
func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
baggageMap := baggage.MapFromContext(ctx)
firstIter := true
var headerValueBuilder strings.Builder
baggageMap.Foreach(func(kv attribute.KeyValue) bool {
if !firstIter {
headerValueBuilder.WriteRune(',')
}
firstIter = false
headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace((string)(kv.Key))))
headerValueBuilder.WriteRune('=')
headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace(kv.Value.Emit())))
return true
})
if headerValueBuilder.Len() > 0 {
headerString := headerValueBuilder.String()
carrier.Set(baggageHeader, headerString)
bStr := baggage.FromContext(ctx).String()
if bStr != "" {
carrier.Set(baggageHeader, bStr)
}
}
// Extract returns a copy of parent with the baggage from the carrier added.
func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
bVal := carrier.Get(baggageHeader)
if bVal == "" {
bStr := carrier.Get(baggageHeader)
if bStr == "" {
return parent
}
baggageValues := strings.Split(bVal, ",")
keyValues := make([]attribute.KeyValue, 0, len(baggageValues))
for _, baggageValue := range baggageValues {
valueAndProps := strings.Split(baggageValue, ";")
if len(valueAndProps) < 1 {
continue
}
nameValue := strings.Split(valueAndProps[0], "=")
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
// TODO (skaris): properties defiend https://w3c.github.io/correlation-context/, are currently
// just put as part of the value.
var trimmedValueWithProps strings.Builder
trimmedValueWithProps.WriteString(trimmedValue)
for _, prop := range valueAndProps[1:] {
trimmedValueWithProps.WriteRune(';')
trimmedValueWithProps.WriteString(prop)
}
keyValues = append(keyValues, attribute.String(trimmedName, trimmedValueWithProps.String()))
bag, err := baggage.Parse(bStr)
if err != nil {
return parent
}
if len(keyValues) > 0 {
// Only update the context if valid values were found
return baggage.ContextWithMap(parent, baggage.NewMap(baggage.MapUpdate{
MultiKV: keyValues,
}))
}
return parent
return baggage.ContextWithBaggage(parent, bag)
}
// Fields returns the keys who's values are set with Inject.

View File

@ -15,14 +15,10 @@
/*
Package propagation contains OpenTelemetry context propagators.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
OpenTelemetry propagators are used to extract and inject context data from and
into messages exchanged by applications. The propagator supported by this
package is the W3C Trace Context encoding
(https://www.w3.org/TR/trace-context/), and W3C Baggage
(https://w3c.github.io/baggage/).
(https://www.w3.org/TR/baggage/).
*/
package propagation // import "go.opentelemetry.io/otel/propagation"

View File

@ -21,12 +21,49 @@ import (
// TextMapCarrier is the storage medium used by a TextMapPropagator.
type TextMapCarrier interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Get returns the value associated with the passed key.
Get(key string) string
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Set stores the key-value pair.
Set(key string, value string)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Keys lists the keys stored in this carrier.
Keys() []string
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
// medium for propagated key-value pairs.
type MapCarrier map[string]string
// Compile time check that MapCarrier implements the TextMapCarrier.
var _ TextMapCarrier = MapCarrier{}
// Get returns the value associated with the passed key.
func (c MapCarrier) Get(key string) string {
return c[key]
}
// Set stores the key-value pair.
func (c MapCarrier) Set(key, value string) {
c[key] = value
}
// Keys lists the keys stored in this carrier.
func (c MapCarrier) Keys() []string {
keys := make([]string, 0, len(c))
for k := range c {
keys = append(keys, k)
}
return keys
}
// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
@ -54,12 +91,23 @@ func (hc HeaderCarrier) Keys() []string {
// TextMapPropagator propagates cross-cutting concerns as key-value text
// pairs within a carrier that travels in-band across process boundaries.
type TextMapPropagator interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Inject set cross-cutting concerns from the Context into the carrier.
Inject(ctx context.Context, carrier TextMapCarrier)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Extract reads cross-cutting concerns from the carrier into a Context.
Extract(ctx context.Context, carrier TextMapCarrier) context.Context
// Fields returns the keys who's values are set with Inject.
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Fields returns the keys whose values are set with Inject.
Fields() []string
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
type compositeTextMapPropagator []TextMapPropagator

Some files were not shown because too many files have changed in this diff Show More