rebase: update kubernetes to 1.26.1

update kubernetes and its dependencies
to v1.26.1

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-02-01 18:06:36 +01:00
committed by mergify[bot]
parent e9e33fb851
commit 9c8de9471e
937 changed files with 75539 additions and 33050 deletions

View File

@ -1,13 +0,0 @@
.DS_Store
Thumbs.db
.tools/
.idea/
.vscode/
*.iml
*.so
coverage.*
example
instrumentation/google.golang.org/grpc/otelgrpc/example/server/server
instrumentation/google.golang.org/grpc/otelgrpc/example/client/client

View File

@ -1,32 +0,0 @@
# See https://github.com/golangci/golangci-lint#config-file
run:
issues-exit-code: 1 #Default
tests: true #Default
linters:
enable:
- misspell
- goimports
- golint
- gofmt
issues:
exclude-rules:
# helpers in tests often (rightfully) pass a *testing.T as their first argument
- path: _test\.go
text: "context.Context should be the first parameter of a function"
linters:
- golint
# Yes, they are, but it's okay in a test
- path: _test\.go
text: "exported func.*returns unexported type.*which can be annoying to use"
linters:
- golint
linters-settings:
misspell:
locale: US
#ignore-words:
# - someword
goimports:
local-prefixes: go.opentelemetry.io

View File

@ -1,319 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.20.0] - 2021-04-23
### Changed
- The `go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo` instrumentation now accepts a `WithCommandAttributeDisabled`,
so the caller can specify whether to opt-out of tracing the mongo command. (#712)
- Upgrade to v0.20.0 of `go.opentelemetry.io/otel`. (#758)
- The B3 and Jaeger propagators now store their debug or deferred state in the context.Context instead of the SpanContext. (#758)
## [0.19.0] - 2021-03-19
### Changed
- Upgrade to v0.19.0 of `go.opentelemetry.io/otel`.
## [0.18.0] - 2021-03-04
### Fixed
- `otelmemcache` no longer sets span status to OK instead of leaving it unset. (#477)
- Fix goroutine leak in gRPC `StreamClientInterceptor`. (#581)
### Removed
- Remove service name from `otelmemcache` configuration and span attributes. (#477)
## [0.17.0] - 2021-02-15
### Added
- Add `ot-tracer` propagator (#562)
### Changed
- Rename project default branch from `master` to `main`.
### Fixed
- Added failure message for AWS ECS resource detector for better debugging (#568)
- Goroutine leak in gRPC StreamClientInterceptor while streamer returns an error. (#581)
## [0.16.0] - 2021-01-13
### Fixed
- Fix module path for AWS ECS resource detector (#517)
## [0.15.1] - 2020-12-14
### Added
- Add registry link check to `Makefile` and pre-release script. (#446)
- A new AWS X-Ray ID Generator (#459)
- Migrate CircleCI jobs to GitHub Actions (#476)
- Add CodeQL GitHub Action (#506)
- Add gosec workflow to GitHub Actions (#507)
### Fixed
- Fixes the body replacement in otelhttp to not to mutate a nil body. (#484)
## [0.15.0] - 2020-12-11
### Added
- A new Amazon EKS resource detector. (#465)
- A new `gcp.CloudRun` detector for detecting resource from a Cloud Run instance. (#455)
## [0.14.0] - 2020-11-20
### Added
- `otelhttp.{Get,Head,Post,PostForm}` convenience wrappers for their `http` counterparts. (#390)
- The AWS detector now adds the cloud zone, host image ID, host type, and host name to the returned `Resource`. (#410)
- Add Amazon ECS Resource Detector for AWS X-Ray. (#466)
- Add propagator for AWS X-Ray (#462)
### Changed
- Add semantic version to `Tracer` / `Meter` created by instrumentation packages `otelsaram`, `otelrestful`, `otelmongo`, `otelhttp` and `otelhttptrace`. (#412)
- Update instrumentation guidelines about tracer / meter semantic version. (#412)
- Replace internal tracer and meter helpers by helpers from `go.opentelemetry.io/otel`. (#414)
- gRPC instrumentation sets span attribute `rpc.grpc.status_code`. (#453)
## Fixed
- `/detectors/aws` no longer fails if instance metadata is not available (e.g. not running in AWS) (#401)
- The AWS detector now returns a partial resource and an appropriate error if it encounters an error part way through determining a `Resource` identity. (#410)
- The `host` instrumentation unit test has been updated to not depend on the system it runs on. (#426)
## [0.13.0] - 2020-10-09
## Added
- A Jaeger propagator. (#375)
## Changed
- The `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc` package instrumentation no longer accepts a `Tracer` as an argument to the interceptor function.
Instead, a new `WithTracerProvider` option is added to configure the `TracerProvider` used when creating the `Tracer` for the instrumentation. (#373)
- The `go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron` instrumentation now accepts a `TracerProvider` rather than a `Tracer`. (#374)
- Remove `go.opentelemetry.io/otel/sdk` dependency from instrumentation. (#381)
- Use `httpsnoop` in `go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux` to ensure `http.ResponseWriter` additional interfaces are preserved. (#388)
### Fixed
- The `go.opentelemetry.io/contrib/instrumentation/github.com/labstack/echo/otelecho.Middleware` no longer sends duplicate errors to the global `ErrorHandler`. (#377, #364)
- The import comment in `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` is now correctly quoted. (#379)
- The B3 propagator sets the sample bitmask when the sampling decision is `debug`. (#369)
## [0.12.0] - 2020-09-25
### Changed
- Replace `WithTracer` with `WithTracerProvider` in the `go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron` instrumentation. (#374)
### Added
- Benchmark tests for the gRPC instrumentation. (#296)
- Integration testing for the gRPC instrumentation. (#297)
- Allow custom labels to be added to net/http metrics. (#306)
- Added B3 propagator, moving it out of open.telemetry.io/otel repo. (#344)
### Changed
- Unify instrumentation about provider options for `go.mongodb.org/mongo-driver`, `gin-gonic/gin`, `gorilla/mux`,
`labstack/echo`, `emicklei/go-restful`, `bradfitz/gomemcache`, `Shopify/sarama`, `net/http` and `beego`. (#303)
- Update instrumentation guidelines about uniform provider options. Also, update style guide. (#303)
- Make config struct of instrumentation unexported. (#303)
- Instrumentations have been updated to adhere to the [configuration style guide's](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config)
updated recommendation to use `newConfig()` instead of `configure()`. (#336)
- A new instrumentation naming scheme is implemented to avoid package name conflicts for instrumented packages while still remaining discoverable. (#359)
- `google.golang.org/grpc` -> `google.golang.org/grpc/otelgrpc`
- `go.mongodb.org/mongo-driver` -> `go.mongodb.org/mongo-driver/mongo/otelmongo`
- `net/http` -> `net/http/otelhttp`
- `net/http/httptrace` -> `net/http/httptrace/otelhttptrace`
- `github.com/labstack/echo` -> `github.com/labstack/echo/otelecho`
- `github.com/bradfitz/gomemcache` -> `github.com/bradfitz/gomemcache/memcache/otelmemcache`
- `github.com/gin-gonic/gin` -> `github.com/gin-gonic/gin/otelgin`
- `github.com/gocql/gocql` -> `github.com/gocql/gocql/otelgocql`
- `github.com/emicklei/go-restful` -> `github.com/emicklei/go-restful/otelrestful`
- `github.com/Shopify/sarama` -> `github.com/Shopify/sarama/otelsarama`
- `github.com/gorilla/mux` -> `github.com/gorilla/mux/otelmux`
- `github.com/astaxie/beego` -> `github.com/astaxie/beego/otelbeego`
- `gopkg.in/macaron.v1` -> `gopkg.in/macaron.v1/otelmacaron`
- Rename `OTelBeegoHandler` to `Handler` in the `go.opentelemetry.io/contrib/instrumentation/github.com/astaxie/beego/otelbeego` package. (#359)
## [0.11.0] - 2020-08-25
### Added
- Top-level `Version()` and `SemVersion()` functions defining the current version of the contrib package. (#225)
- Instrumentation for the `github.com/astaxie/beego` package. (#200)
- Instrumentation for the `github.com/bradfitz/gomemcache` package. (#204)
- Host metrics instrumentation. (#231)
- Cortex histogram and distribution support. (#237)
- Cortex example project. (#238)
- Cortex HTTP authentication. (#246)
### Changed
- Remove service name as a parameter of Sarama instrumentation. (#221)
- Replace `WithTracer` with `WithTracerProvider` in Sarama instrumentation. (#221)
- Switch to use common top-level module `SemVersion()` when creating versioned tracer in `bradfitz/gomemcache`. (#226)
- Use `IntegrationShouldRun` in `gomemcache_test`. (#254)
- Use Go 1.15 for CI builds. (#236)
- Improved configuration for `runtime` instrumentation. (#224)
### Fixed
- Update dependabot configuration to include newly added `bradfitz/gomemcache` package. (#226)
- Correct `runtime` instrumentation name. (#241)
## [0.10.1] - 2020-08-13
### Added
- The `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc` module has been added to replace the instrumentation that had previoiusly existed in the `go.opentelemetry.io/otel/instrumentation/grpctrace` package. (#189)
- Instrumentation for the stdlib `net/http` and `net/http/httptrace` packages. (#190)
- Initial Cortex exporter. (#202, #205, #210, #211, #215)
### Fixed
- Bump google.golang.org/grpc from 1.30.0 to 1.31.0. (#166)
- Bump go.mongodb.org/mongo-driver from 1.3.5 to 1.4.0 in /instrumentation/go.mongodb.org/mongo-driver. (#170)
- Bump google.golang.org/grpc in /instrumentation/github.com/gin-gonic/gin. (#173)
- Bump google.golang.org/grpc in /instrumentation/github.com/labstack/echo. (#176)
- Bump google.golang.org/grpc from 1.30.0 to 1.31.0 in /instrumentation/github.com/Shopify/sarama. (#179)
- Bump cloud.google.com/go from 0.61.0 to 0.63.0 in /detectors/gcp. (#181, #199)
- Bump github.com/aws/aws-sdk-go from 1.33.15 to 1.34.1 in /detectors/aws. (#184, #192, #193, #198, #201, #203)
- Bump github.com/golangci/golangci-lint from 1.29.0 to 1.30.0 in /tools. (#186)
- Setup CI to run tests that require external resources (Cassandra and MongoDB). (#191)
- Bump github.com/Shopify/sarama from 1.26.4 to 1.27.0 in /instrumentation/github.com/Shopify/sarama. (#206)
## [0.10.0] - 2020-07-31
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0) dependency to v0.10.0 and includes new instrumentation for popular Kafka and Cassandra clients.
### Added
- A detector that generate resources from GCE instance. (#132)
- A detector that generate resources from AWS instances. (#139)
- Instrumentation for the Kafka client github.com/Shopify/sarama. (#134, #153)
- Links and status message for mock span in the internal testing library. (#134)
- Instrumentation for the Cassandra client github.com/gocql/gocql. (#137)
- A detector that generate resources from GKE clusters. (#154)
### Fixed
- Bump github.com/aws/aws-sdk-go from 1.33.8 to 1.33.15 in /detectors/aws. (#155, #157, #159, #162)
- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#146)
## [0.9.0] - 2020-07-20
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0) dependency to v0.9.0.
### Fixed
- Bump github.com/emicklei/go-restful/v3 from 3.0.0 to 3.2.0 in /instrumentation/github.com/emicklei/go-restful. (#133)
- Update dependabot configuration to correctly check all included packages. (#131)
- Update `RELEASING.md` with correct `tag.sh` command. (#130)
## [0.8.0] - 2020-07-10
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0) dependency to v0.8.0, includes minor fixes, and new instrumentation.
### Added
- Create this `CHANGELOG.md`. (#114)
- Add `emicklei/go-restful/v3` trace instrumentation. (#115)
### Changed
- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#114)
- Move all `github.com` package instrumentation under a `github.com` directory. (#118)
### Fixed
- Update README to include information about external instrumentation.
To start, this includes native instrumentation found in the `go-redis/redis` package. (#117)
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.2 in /tools. (#122, #123, #125)
- Bump go.mongodb.org/mongo-driver from 1.3.4 to 1.3.5 in /instrumentation/go.mongodb.org/mongo-driver. (#124)
## [0.7.0] - 2020-06-29
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0) dependency to v0.7.0.
### Added
- Create `RELEASING.md` instructions. (#101)
- Apply transitive dependabot go.mod updates as part of a new automatic Github workflow. (#94)
- New dependabot integration to automate package upgrades. (#61)
- Add automatic tag generation script for release. (#60)
### Changed
- Upgrade Datadog metrics exporter to include Resource tags. (#46)
- Added output validation to Datadog example. (#96)
- Move Macaron package to match layout guidelines. (#92)
- Update top-level README and instrumentation README. (#92)
- Bump google.golang.org/grpc from 1.29.1 to 1.30.0. (#99)
- Bump github.com/golangci/golangci-lint from 1.21.0 to 1.27.0 in /tools. (#77)
- Bump go.mongodb.org/mongo-driver from 1.3.2 to 1.3.4 in /instrumentation/go.mongodb.org/mongo-driver. (#76)
- Bump github.com/stretchr/testify from 1.5.1 to 1.6.1. (#74)
- Bump gopkg.in/macaron.v1 from 1.3.5 to 1.3.9 in /instrumentation/macaron. (#68)
- Bump github.com/gin-gonic/gin from 1.6.2 to 1.6.3 in /instrumentation/gin-gonic/gin. (#73)
- Bump github.com/DataDog/datadog-go from 3.5.0+incompatible to 3.7.2+incompatible in /exporters/metric/datadog. (#78)
- Replaced `internal/trace/http.go` helpers with `api/standard` helpers from otel-go repo. (#112)
## [0.6.1] - 2020-06-08
First official tagged release of `contrib` repository.
### Added
- `labstack/echo` trace instrumentation (#42)
- `mongodb` trace instrumentation (#26)
- Go Runtime metrics (#9)
- `gorilla/mux` trace instrumentation (#19)
- `gin-gonic` trace instrumentation (#15)
- `macaron` trace instrumentation (#20)
- `dogstatsd` metrics exporter (#10)
- `datadog` metrics exporter (#22)
- Tags to all modules in repository
- Repository folder structure and automated build (#3)
### Changes
- Prefix support for dogstatsd (#34)
- Update Go Runtime package to use batch observer (#44)
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go-contrib/compare/v0.20.0...HEAD
[0.20.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.20.0
[0.19.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.19.0
[0.18.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.18.0
[0.17.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.17.0
[0.16.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.16.0
[0.15.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.15.1
[0.15.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.15.0
[0.14.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.14.0
[0.13.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.13.0
[0.12.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.12.0
[0.11.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.11.0
[0.10.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.10.1
[0.10.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.10.0
[0.9.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.9.0
[0.8.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.8.0
[0.7.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.7.0
[0.6.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.6.1

View File

@ -1,17 +0,0 @@
#####################################################
#
# List of approvers for this repository
#
#####################################################
#
# Learn about membership in OpenTelemetry community:
# https://github.com/open-telemetry/community/blob/main/community-membership.md
#
#
# Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo
CODEOWNERS @MrAlias @Aneurysm9

View File

@ -1,135 +0,0 @@
# Contributing to opentelemetry-go-contrib
The Go special interest group (SIG) meets regularly. See the
OpenTelemetry
[community](https://github.com/open-telemetry/community#golang-sdk)
repo for information on this and other language SIGs.
See the [public meeting
notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
for a summary description of past meetings. To request edit access,
join the meeting or get in touch on
[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
## Development
There are some generated files checked into the repo. To make sure
that the generated files are up-to-date, run `make` (or `make
precommit` - the `precommit` target is the default).
The `precommit` target also fixes the formatting of the code and
checks the status of the go module files.
If after running `make precommit` the output of `git status` contains
`nothing to commit, working tree clean` then it means that everything
is up-to-date and properly formatted.
## Pull Requests
### How to Send Pull Requests
Everyone is welcome to contribute code to `opentelemetry-go-contrib` via
GitHub pull requests (PRs).
To create a new PR, fork the project in GitHub and clone the upstream
repo:
```sh
$ git clone https://github.com/open-telemetry/opentelemetry-go-contrib
```
This would put the project in the `opentelemetry-go-contrib` directory in
current working directory.
Enter the newly created directory and add your fork as a new remote:
```sh
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md` and push the branch to your fork:
```sh
$ git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
$ make precommit
$ git add -p
$ git commit
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go-contrib` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,
tag it as `work-in-progress`, or mark it as
[`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
* Make sure CLA is signed and CI is clear.
### How to Get PRs Merged
A PR is considered to be **ready to merge** when:
* It has received two approvals from Approvers/Maintainers (at
different companies).
* Feedback has been addressed.
* Any substantive changes to your PR will require that you clear any prior
Approval reviews, this includes changes resulting from other feedback. Unless
the approver explicitly stated that their approval will persist across
changes it should be assumed that the PR needs their review again. Other
project members (e.g. approvers, maintainers) can help with this if there are
any questions or if you forget to clear reviews.
* It has been open for review for at least one working day. This gives
people reasonable time to review.
* Trivial change (typo, cosmetic, doc, etc.) doesn't have to wait for
one day.
* `CHANGELOG.md` has been updated to reflect what has been
added, changed, removed, or fixed.
* Urgent fix can take exception as long as it has been actively
communicated.
Any Maintainer can merge the PR once it is **ready to merge**.
## Style Guide
* Make sure to run `make precommit` - this will find and fix the code
formatting.
* Check [opentelemetry-go Style Guide](https://github.com/open-telemetry/opentelemetry-go/blob/main/CONTRIBUTING.md#style-guide)
## Adding a new Contrib package
To add a new contrib package follow an existing one. An empty Sample instrumentation
provides base structure with an example and a test. Each contrib package
should be its own module. A contrib package may contain more than one go package.
### Folder Structure
- instrumentation/\<instrumentation-package> (**Common**)
- instrumentation/\<instrumentation-package>/trace (**specific to trace**)
- instrumentation/\<instrumentation-package>/metrics (**specific to metrics**)
#### Example
- instrumentation/gorm/trace
- instrumentation/kafka/metrics
## Approvers and Maintainers
Approvers:
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM)
- [David Ashpole](https://github.com/dashpole), Google
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
Maintainers:
- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
- [Tyler Yahn](https://github.com/MrAlias), New Relic
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).

View File

@ -1,203 +0,0 @@
TOOLS_MOD_DIR := ./tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
# URLs to check if all contrib entries exist in the registry.
REGISTRY_BASE_URL = https://raw.githubusercontent.com/open-telemetry/opentelemetry.io/main/content/en/registry
CONTRIB_REPO_URL = https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main
# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
SKIP_386_TEST = false
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Darwin)
SW_VERS := $(shell sw_vers -productVersion)
ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
SKIP_386_TEST = true
endif
endif
GOTEST_MIN = go test -v -timeout 30s
GOTEST = $(GOTEST_MIN) -race
GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
.DEFAULT_GOAL := precommit
.PHONY: precommit
TOOLS_DIR := $(abspath ./.tools)
$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
precommit: dependabot-check license-check generate build lint test
.PHONY: test-with-coverage
test-with-coverage:
set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
echo "go test ./... + coverage in $${dir}"; \
(cd "$${dir}" && \
$(GOTEST_WITH_COVERAGE) ./... && \
go tool cover -html=coverage.out -o coverage.html); \
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
done; \
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt && rm coverage.txt.bak
.PHONY: ci
ci: precommit check-clean-work-tree test-with-coverage test-386
.PHONY: test-gocql
test-gocql:
@if ./tools/should_build.sh gocql; then \
set -e; \
docker run --name cass-integ --rm -p 9042:9042 -d cassandra:3; \
CMD=cassandra IMG_NAME=cass-integ ./tools/wait.sh; \
(cd instrumentation/github.com/gocql/gocql/otelgocql && \
$(GOTEST_WITH_COVERAGE) . && \
go tool cover -html=coverage.out -o coverage.html); \
docker stop cass-integ; \
fi
.PHONY: test-mongo-driver
test-mongo-driver:
@if ./tools/should_build.sh mongo-driver; then \
set -e; \
docker run --name mongo-integ --rm -p 27017:27017 -d mongo; \
CMD=mongo IMG_NAME=mongo-integ ./tools/wait.sh; \
(cd instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo && \
$(GOTEST_WITH_COVERAGE) . && \
go tool cover -html=coverage.out -o coverage.html); \
docker stop mongo-integ; \
fi
.PHONY: test-gomemcache
test-gomemcache:
@if ./tools/should_build.sh gomemcache; then \
set -e; \
docker run --name gomemcache-integ --rm -p 11211:11211 -d memcached; \
CMD=gomemcache IMG_NAME=gomemcache-integ ./tools/wait.sh; \
(cd instrumentation/github.com/bradfitz/gomemcache/memcache/otelmemcache && \
$(GOTEST_WITH_COVERAGE) . && \
go tool cover -html=coverage.out -o coverage.html); \
docker stop gomemcache-integ ; \
fi
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
echo; \
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
echo; \
git status; \
exit 1; \
fi
.PHONY: build
build:
# TODO: Fix this on windows.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "compiling all packages in $${dir}"; \
(cd "$${dir}" && \
go build ./... && \
go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
done
.PHONY: test
test:
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... + race in $${dir}"; \
(cd "$${dir}" && \
$(GOTEST) ./...); \
done
.PHONY: test-386
test-386:
if [ $(SKIP_386_TEST) = true ] ; then \
echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
else \
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... GOARCH 386 in $${dir}"; \
(cd "$${dir}" && \
GOARCH=386 $(GOTEST_MIN) ./...); \
done; \
fi
.PHONY: lint
lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(TOOLS_DIR)/golangci-lint run --fix && \
$(TOOLS_DIR)/golangci-lint run); \
done
$(TOOLS_DIR)/misspell -w $(ALL_DOCS)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go mod tidy); \
done
.PHONY: generate
generate: $(TOOLS_DIR)/stringer
PATH="$(TOOLS_DIR):$${PATH}" go generate ./...
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
.PHONY: registry-links-check
registry-links-check:
@checkRes=$$( \
for f in $$( find ./instrumentation ./exporters ./detectors ! -path './instrumentation/net/*' -type f -name 'go.mod' -exec dirname {} \; | egrep -v '/example|/utils' | sort ) \
./instrumentation/net/http; do \
TYPE="instrumentation"; \
if $$(echo "$$f" | grep -q "exporters"); then \
TYPE="exporter"; \
fi; \
if $$(echo "$$f" | grep -q "detectors"); then \
TYPE="detector"; \
fi; \
NAME=$$(echo "$$f" | sed -e 's/.*\///' -e 's/.*otel//'); \
LINK=$(CONTRIB_REPO_URL)/$$(echo "$$f" | sed -e 's/..//' -e 's/\/otel.*$$//'); \
if ! $$(curl -s $(REGISTRY_BASE_URL)/$${TYPE}-go-$${NAME}.md | grep -q "$${LINK}"); then \
echo "$$f"; \
fi \
done; \
); \
if [ -n "$$checkRes" ]; then \
echo "WARNING: registry link check failed for the following packages:"; echo "$${checkRes}"; \
fi
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
do grep -q "$$f" .github/dependabot.yml \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
exit 1; \
fi

View File

@ -1,25 +0,0 @@
# OpenTelemetry-Go Contrib
[![build_and_test](https://github.com/open-telemetry/opentelemetry-go-contrib/workflows/build_and_test/badge.svg)](https://github.com/open-telemetry/opentelemetry-go-contrib/actions?query=workflow%3Abuild_and_test+branch%3Amain)
[![Docs](https://godoc.org/go.opentelemetry.io/contrib?status.svg)](https://pkg.go.dev/go.opentelemetry.io/contrib)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/contrib)](https://goreportcard.com/report/go.opentelemetry.io/contrib)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
Collection of 3rd-party instrumentation and exporters for [OpenTelemetry-Go](https://github.com/open-telemetry/opentelemetry-go).
## Contents
- [Instrumentation](./instrumentation/): Packages providing OpenTelemetry instrumentation for 3rd-party libraries.
- [Exporters](./exporters/): Packages providing OpenTelemetry exporters for 3rd-party telemetry systems.
- [Propagators](./propagators/): Packages providing OpenTelemetry context propagators for 3rd-party propagation formats.
- [Detectors](./detectors/): Packages providing OpenTelemetry resource detectors for 3rd-party cloud computing environments.
## Project Status
This project is currently in a pre-GA phase. Our progress towards a GA release
candidate is tracked in [this project
board](https://github.com/orgs/open-telemetry/projects/5).
## Contributing
For information on how to contribute, consult [the contributing guidelines](./CONTRIBUTING.md)

View File

@ -1,96 +0,0 @@
# Release Process
There are two types of release for the `go.opentelemetry.io/contrib` repo
and submodules.
1. **Case 1** A release due to changes independent of the
`go.opentelemetry.io/otel` module, e.g. perhaps a critical bug fix in
one of the contrib modules.
2. **Case 2** A release due to a breaking API change in
`go.opentelemetry.io/otel` which all modules in this repo
depend on.
## Pre-Release
Update go.mod for submodules to depend on the upcoming new release of
the module in this repo, `go.opentelemetry.io/contrib`. Decide on the
next version of the semantic tag to apply to the contrib
module based on whether you fall into Case 1 or Case 2.
### Case 1
If the changes are all internal to this repo, then the new tag will
most often be a patch or minor version upgrade to the existing tag on
this module. Let's call this `<new_contrib_tag>`.
### Case 2
If a new release is required due to breaking changes in
`go.opentelemetry.io/otel`, then the new semantic tag for this repo
should be bumped to match the `go.opentelemetry.io/otel` new tag.
Let's call this `<new_otel_tag>`. The script checks that
`go.opentelemetry.io/otel@v<new_otel_tag>` is a valid tag, so you need
to wait until that tag has been pushed in the main repo.
In nearly all cases, `<new_contrib_tag>` should be the same as
`<new_otel_tag>`.
1. Run `pre_release.sh` script to create a branch `pre_release_<new_contrib_tag>`.
The script will also run `go mod tidy` and `make ci`.
* **Case 1** `./pre_release.sh -t <new_contrib_tag>`
* **Case 2** `./pre_release.sh -o <new_otel_tag> [-t <new_contrib_tag>]`
2. If you used `-o <new_otel_tag>` to rewrite the modules to depend on
a new version of `go.opentelemetry.io/otel`, there will likely be
breaking changes that require fixes to the files in this
`contrib` repo. Make the appropriate fixes to address any API
breaks and run through the
```
git commit -m "fixes due to API changes"
make precommit
```
cycle until everything passes
4. Push the changes to upstream.
```
git diff main
git push
```
5. Create a PR on github and merge the PR once approved.
### Tag
Now create a `<new_contrib_tag>` on the commit hash of the changes made in pre-release step,
1. Run the tag.sh script.
```
./tag.sh <new_contrib_tag> <commit-hash>
```
2. Push tags upstream. Make sure you push upstream for all the sub-module tags as well.
```
git push upstream <new_contrib_tag>
git push upstream <submodules-path/new_contrib_tag>
...
```
## Release
Now create a release for the new `<new_contrib_tag>` on github.
The release body should include all the release notes in the Changelog for this release.
Additionally, the `tag.sh` script generates commit logs since last release which can be used to suppliment the release notes.
<!-- ## Verify Examples -->
<!-- After releasing run following script to verify that examples build outside of the otel repo. -->
<!-- The script copies examples into a different directory and builds them. -->
<!-- ``` -->
<!-- ./verify_examples.sh -->
<!-- ``` -->

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
@ -24,12 +24,12 @@ import (
// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
// Please be careful of intitialization order - for example, if you change
// the global propagator, the DefaultClient might still be using the old one
// the global propagator, the DefaultClient might still be using the old one.
var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
// Get is a convenient replacement for http.Get that adds a span around the request.
func Get(ctx context.Context, url string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
if err != nil {
return nil, err
}
@ -37,8 +37,8 @@ func Get(ctx context.Context, url string) (resp *http.Response, err error) {
}
// Head is a convenient replacement for http.Head that adds a span around the request.
func Head(ctx context.Context, url string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
if err != nil {
return nil, err
}
@ -46,8 +46,8 @@ func Head(ctx context.Context, url string) (resp *http.Response, err error) {
}
// Post is a convenient replacement for http.Post that adds a span around the request.
func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "POST", url, body)
func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
if err != nil {
return nil, err
}
@ -56,6 +56,6 @@ func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *h
}
// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
func PostForm(ctx context.Context, url string, data url.Values) (resp *http.Response, err error) {
return Post(ctx, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View File

@ -12,12 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"net/http"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// Attribute keys that can be added to a span.
@ -28,7 +29,7 @@ const (
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
)
// Server HTTP metrics
// Server HTTP metrics.
const (
RequestCount = "http.server.request_count" // Incoming request count total
RequestContentLength = "http.server.request_content_length" // Incoming request bytes total
@ -39,3 +40,7 @@ const (
// Filter is a predicate used to determine whether a given http.request should
// be traced. A Filter must return true if the request should be traced.
type Filter func(*http.Request) bool
func newTracer(tp trace.TracerProvider) trace.Tracer {
return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion()))
}

View File

@ -12,12 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
"net/http"
"net/http/httptrace"
"go.opentelemetry.io/contrib"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
@ -35,47 +36,48 @@ type config struct {
Tracer trace.Tracer
Meter metric.Meter
Propagators propagation.TextMapPropagator
SpanStartOptions []trace.SpanOption
SpanStartOptions []trace.SpanStartOption
PublicEndpoint bool
PublicEndpointFn func(*http.Request) bool
ReadEvent bool
WriteEvent bool
Filters []Filter
SpanNameFormatter func(string, *http.Request) string
ClientTrace func(context.Context) *httptrace.ClientTrace
TracerProvider trace.TracerProvider
MeterProvider metric.MeterProvider
}
// Option Interface used for setting *optional* config properties
// Option interface used for setting optional config properties.
type Option interface {
Apply(*config)
apply(*config)
}
// OptionFunc provides a convenience wrapper for simple Options
// that can be represented as functions.
type OptionFunc func(*config)
type optionFunc func(*config)
func (o OptionFunc) Apply(c *config) {
func (o optionFunc) apply(c *config) {
o(c)
}
// newConfig creates a new config struct and applies opts to it.
func newConfig(opts ...Option) *config {
c := &config{
Propagators: otel.GetTextMapPropagator(),
TracerProvider: otel.GetTracerProvider(),
MeterProvider: global.GetMeterProvider(),
Propagators: otel.GetTextMapPropagator(),
MeterProvider: global.MeterProvider(),
}
for _, opt := range opts {
opt.Apply(c)
opt.apply(c)
}
// Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
if c.TracerProvider != nil {
c.Tracer = newTracer(c.TracerProvider)
}
c.Tracer = c.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(contrib.SemVersion()),
)
c.Meter = c.MeterProvider.Meter(
instrumentationName,
metric.WithInstrumentationVersion(contrib.SemVersion()),
metric.WithInstrumentationVersion(SemVersion()),
)
return c
@ -84,16 +86,20 @@ func newConfig(opts ...Option) *config {
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
// If none is specified, the global provider is used.
func WithTracerProvider(provider trace.TracerProvider) Option {
return OptionFunc(func(cfg *config) {
cfg.TracerProvider = provider
return optionFunc(func(cfg *config) {
if provider != nil {
cfg.TracerProvider = provider
}
})
}
// WithMeterProvider specifies a meter provider to use for creating a meter.
// If none is specified, the global provider is used.
func WithMeterProvider(provider metric.MeterProvider) Option {
return OptionFunc(func(cfg *config) {
cfg.MeterProvider = provider
return optionFunc(func(cfg *config) {
if provider != nil {
cfg.MeterProvider = provider
}
})
}
@ -101,23 +107,36 @@ func WithMeterProvider(provider metric.MeterProvider) Option {
// span context. If this option is not provided, then the association is a child
// association instead of a link.
func WithPublicEndpoint() Option {
return OptionFunc(func(c *config) {
c.SpanStartOptions = append(c.SpanStartOptions, trace.WithNewRoot())
return optionFunc(func(c *config) {
c.PublicEndpoint = true
})
}
// WithPublicEndpointFn runs with every request, and allows conditionnally
// configuring the Handler to link the span with an incoming span context. If
// this option is not provided or returns false, then the association is a
// child association instead of a link.
// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
return optionFunc(func(c *config) {
c.PublicEndpointFn = fn
})
}
// WithPropagators configures specific propagators. If this
// option isn't specified then
// option isn't specified, then the global TextMapPropagator is used.
func WithPropagators(ps propagation.TextMapPropagator) Option {
return OptionFunc(func(c *config) {
c.Propagators = ps
return optionFunc(func(c *config) {
if ps != nil {
c.Propagators = ps
}
})
}
// WithSpanOptions configures an additional set of
// trace.SpanOptions, which are applied to each new span.
func WithSpanOptions(opts ...trace.SpanOption) Option {
return OptionFunc(func(c *config) {
func WithSpanOptions(opts ...trace.SpanStartOption) Option {
return optionFunc(func(c *config) {
c.SpanStartOptions = append(c.SpanStartOptions, opts...)
})
}
@ -129,14 +148,14 @@ func WithSpanOptions(opts ...trace.SpanOption) Option {
// Filters will be invoked for each processed request, it is advised to make them
// simple and fast.
func WithFilter(f Filter) Option {
return OptionFunc(func(c *config) {
return optionFunc(func(c *config) {
c.Filters = append(c.Filters, f)
})
}
type event int
// Different types of events that can be recorded, see WithMessageEvents
// Different types of events that can be recorded, see WithMessageEvents.
const (
ReadEvents event = iota
WriteEvents
@ -147,12 +166,12 @@ const (
// end of the request.
//
// Valid events are:
// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read
// using the ReadBytesKey
// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
// using the WriteBytesKey
// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
// using the ReadBytesKey
// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
// using the WriteBytesKey
func WithMessageEvents(events ...event) Option {
return OptionFunc(func(c *config) {
return optionFunc(func(c *config) {
for _, e := range events {
switch e {
case ReadEvents:
@ -165,9 +184,17 @@ func WithMessageEvents(events ...event) Option {
}
// WithSpanNameFormatter takes a function that will be called on every
// request and the returned string will become the Span Name
// request and the returned string will become the Span Name.
func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
return OptionFunc(func(c *config) {
return optionFunc(func(c *config) {
c.SpanNameFormatter = f
})
}
// WithClientTrace takes a function that returns client trace instance that will be
// applied to the requests sent through the otelhttp Transport.
func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
return optionFunc(func(c *config) {
c.ClientTrace = f
})
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"io"
@ -24,8 +24,10 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/semconv"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
)
@ -42,13 +44,15 @@ type Handler struct {
tracer trace.Tracer
meter metric.Meter
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanOption
spanStartOptions []trace.SpanStartOption
readEvent bool
writeEvent bool
filters []Filter
spanNameFormatter func(string, *http.Request) string
counters map[string]metric.Int64Counter
valueRecorders map[string]metric.Int64ValueRecorder
counters map[string]syncint64.Counter
valueRecorders map[string]syncfloat64.Histogram
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
}
func defaultHandlerFormatter(operation string, _ *http.Request) string {
@ -84,6 +88,8 @@ func (h *Handler) configure(c *config) {
h.writeEvent = c.WriteEvent
h.filters = c.Filters
h.spanNameFormatter = c.SpanNameFormatter
h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn
}
func handleErr(err error) {
@ -93,16 +99,16 @@ func handleErr(err error) {
}
func (h *Handler) createMeasures() {
h.counters = make(map[string]metric.Int64Counter)
h.valueRecorders = make(map[string]metric.Int64ValueRecorder)
h.counters = make(map[string]syncint64.Counter)
h.valueRecorders = make(map[string]syncfloat64.Histogram)
requestBytesCounter, err := h.meter.NewInt64Counter(RequestContentLength)
requestBytesCounter, err := h.meter.SyncInt64().Counter(RequestContentLength)
handleErr(err)
responseBytesCounter, err := h.meter.NewInt64Counter(ResponseContentLength)
responseBytesCounter, err := h.meter.SyncInt64().Counter(ResponseContentLength)
handleErr(err)
serverLatencyMeasure, err := h.meter.NewInt64ValueRecorder(ServerLatency)
serverLatencyMeasure, err := h.meter.SyncFloat64().Histogram(ServerLatency)
handleErr(err)
h.counters[RequestContentLength] = requestBytesCounter
@ -110,7 +116,7 @@ func (h *Handler) createMeasures() {
h.valueRecorders[ServerLatency] = serverLatencyMeasure
}
// ServeHTTP serves HTTP requests (http.Handler)
// ServeHTTP serves HTTP requests (http.Handler).
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
requestStartTime := time.Now()
for _, f := range h.filters {
@ -121,14 +127,33 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
opts := append([]trace.SpanOption{
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
opts := h.spanStartOptions
if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
opts = append(opts, trace.WithNewRoot())
// Linking incoming span context if any for public endpoint.
if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
}
}
opts = append([]trace.SpanStartOption{
trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...),
trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...),
trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...),
}, h.spanStartOptions...) // start with the configured options
}, opts...) // start with the configured options
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
ctx, span := h.tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
tracer := h.tracer
if tracer == nil {
if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
tracer = newTracer(span.TracerProvider())
} else {
tracer = newTracer(otel.GetTracerProvider())
}
}
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
defer span.End()
readRecordFunc := func(int64) {}
@ -140,8 +165,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var bw bodyWrapper
// if request body is nil we don't want to mutate the body as it will affect
// the identity of it in a unforeseeable way because we assert ReadCloser
// fullfills a certain interface and it is indeed nil.
// the identity of it in an unforeseeable way because we assert ReadCloser
// fulfills a certain interface and it is indeed nil.
if r.Body != nil {
bw.ReadCloser = r.Body
bw.record = readRecordFunc
@ -185,7 +210,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.counters[RequestContentLength].Add(ctx, bw.read, attributes...)
h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...)
elapsedTime := time.Since(requestStartTime).Microseconds()
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...)
}
@ -206,7 +232,7 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int,
}
if statusCode > 0 {
attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...)
span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(statusCode))
span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer))
}
if werr != nil && werr != io.EOF {
attributes = append(attributes, WriteErrorKey.String(werr.Error()))

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
@ -35,7 +35,7 @@ func (l *Labeler) Add(ls ...attribute.KeyValue) {
l.attributes = append(l.attributes, ls...)
}
// Labels returns a copy of the attributes added to the Labeler.
// Get returns a copy of the attributes added to the Labeler.
func (l *Labeler) Get() []attribute.KeyValue {
l.mu.Lock()
defer l.mu.Unlock()

View File

@ -12,15 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
"io"
"net/http"
"net/http/httptrace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/semconv"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
)
@ -31,9 +34,10 @@ type Transport struct {
tracer trace.Tracer
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanOption
spanStartOptions []trace.SpanStartOption
filters []Filter
spanNameFormatter func(string, *http.Request) string
clientTrace func(context.Context) *httptrace.ClientTrace
}
var _ http.RoundTripper = &Transport{}
@ -42,7 +46,7 @@ var _ http.RoundTripper = &Transport{}
// starts a span and injects the span context into the outbound request headers.
//
// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
// as the base http.RoundTripper
// as the base http.RoundTripper.
func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
if base == nil {
base = http.DefaultTransport
@ -69,10 +73,11 @@ func (t *Transport) applyConfig(c *config) {
t.spanStartOptions = c.SpanStartOptions
t.filters = c.Filters
t.spanNameFormatter = c.SpanNameFormatter
t.clientTrace = c.ClientTrace
}
func defaultTransportFormatter(_ string, r *http.Request) string {
return r.Method
return "HTTP " + r.Method
}
// RoundTrip creates a Span and propagates its context via the provided request's headers
@ -86,9 +91,23 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
}
}
opts := append([]trace.SpanOption{}, t.spanStartOptions...) // start with the configured options
tracer := t.tracer
ctx, span := t.tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
if tracer == nil {
if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
tracer = newTracer(span.TracerProvider())
} else {
tracer = newTracer(otel.GetTracerProvider())
}
}
opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
if t.clientTrace != nil {
ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
}
r = r.WithContext(ctx)
span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...)
@ -97,24 +116,58 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
res, err := t.rt.RoundTrip(r)
if err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
span.End()
return res, err
}
span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...)
span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode))
res.Body = &wrappedBody{ctx: ctx, span: span, body: res.Body}
res.Body = newWrappedBody(span, res.Body)
return res, err
}
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
// io.ReadCloser. If the passed body implements io.Writer, the returned value
// will implement io.ReadWriteCloser.
func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser {
// The successful protocol switch responses will have a body that
// implement an io.ReadWriteCloser. Ensure this interface type continues
// to be satisfied if that is the case.
if _, ok := body.(io.ReadWriteCloser); ok {
return &wrappedBody{span: span, body: body}
}
// Remove the implementation of the io.ReadWriteCloser and only implement
// the io.ReadCloser.
return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}}
}
// wrappedBody is the response body type returned by the transport
// instrumentation to complete a span. Errors encountered when using the
// response body are recorded in span tracking the response.
//
// The span tracking the response is ended when this body is closed.
//
// If the response body implements the io.Writer interface (i.e. for
// successful protocol switches), the wrapped body also will.
type wrappedBody struct {
ctx context.Context
span trace.Span
body io.ReadCloser
}
var _ io.ReadCloser = &wrappedBody{}
var _ io.ReadWriteCloser = &wrappedBody{}
func (wb *wrappedBody) Write(p []byte) (int, error) {
// This will not panic given the guard in newWrappedBody.
n, err := wb.body.(io.Writer).Write(p)
if err != nil {
wb.span.RecordError(err)
wb.span.SetStatus(codes.Error, err.Error())
}
return n, err
}
func (wb *wrappedBody) Read(b []byte) (int, error) {
n, err := wb.body.Read(b)
@ -126,11 +179,15 @@ func (wb *wrappedBody) Read(b []byte) (int, error) {
wb.span.End()
default:
wb.span.RecordError(err)
wb.span.SetStatus(codes.Error, err.Error())
}
return n, err
}
func (wb *wrappedBody) Close() error {
wb.span.End()
return wb.body.Close()
if wb.body != nil {
return wb.body.Close()
}
return nil
}

View File

@ -12,13 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package contrib contains common values used across all
// instrumentation, exporter, and detector contributions.
package contrib // import "go.opentelemetry.io/contrib"
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
// Version is the current release version of OpenTelemetry Contrib in use.
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
return "0.20.0"
return "0.35.0"
// This string is updated by the pre_release.sh script during release
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otelhttp
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
@ -25,7 +25,7 @@ import (
var _ io.ReadCloser = &bodyWrapper{}
// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
// of bytes read and the last error
// of bytes read and the last error.
type bodyWrapper struct {
io.ReadCloser
record func(n int64) // must not be nil
@ -91,6 +91,5 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) {
}
w.wroteHeader = true
w.statusCode = statusCode
w.props.Inject(w.ctx, propagation.HeaderCarrier(w.Header()))
w.ResponseWriter.WriteHeader(statusCode)
}

View File

@ -1,158 +0,0 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is used for
# a) creating a new tagged release of go.opentelemetry.io/contrib
# b) bumping the referenced version of go.opentelemetry.io/otel
#
# The options can be used together or individually.
#
set -e
declare CONTRIB_TAG
declare OTEL_TAG
help() {
printf "\n"
printf "Usage: %s [-o otel_tag] [-t tag]\n" "$0"
printf "\t-o Otel release tag. Update all go.mod to reference go.opentelemetry.io/otel <otel_tag>.\n"
printf "\t-t New Contrib unreleased tag. Update all go.mod files with this tag.\n"
exit 1 # Exit script after printing help
}
while getopts "t:o:" opt
do
case "$opt" in
t ) CONTRIB_TAG="$OPTARG" ;;
o ) OTEL_TAG="$OPTARG" ;;
? ) help ;; # Print help
esac
done
declare -r SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
validate_tag() {
local tag_=$1
if [[ "${tag_}" =~ ${SEMVER_REGEX} ]]; then
printf "%s is valid semver tag.\n" "${tag_}"
else
printf "%s is not a valid semver tag.\n" "${tag_}"
return 1
fi
}
# Print help in case parameters are empty
if [[ -z "$CONTRIB_TAG" && -z "$OTEL_TAG" ]]
then
printf "At least one of '-o' or '-t' must be specified.\n"
help
fi
## Validate tags first
if [ -n "${OTEL_TAG}" ]; then
validate_tag "${OTEL_TAG}" || exit $?
# check that OTEL_TAG is a currently released tag for go.opentelemetry.io/otel
TMPDIR=$(mktemp -d "/tmp/otel-contrib.XXXXXX") || exit 1
trap "rm -fr ${TMPDIR}" EXIT
(cd "${TMPDIR}" && go mod init tagtest)
# requires go 1.14 for support of '-modfile'
if ! go get -modfile="${TMPDIR}/go.mod" -d -v "go.opentelemetry.io/otel@${OTEL_TAG}"; then
printf "go.opentelemetry.io/otel %s does not exist. Please supply a valid tag\n" "${OTEL_TAG}"
exit 1
fi
fi
if [ -n "${CONTRIB_TAG}" ]; then
validate_tag "${CONTRIB_TAG}" || exit $?
TAG_FOUND=$(git tag --list "${CONTRIB_TAG}")
if [[ ${TAG_FOUND} = "${CONTRIB_TAG}" ]] ; then
printf "Tag %s already exists in this repo\n" "${CONTRIB_TAG}"
exit 1
fi
else
CONTRIB_TAG=${OTEL_TAG} # if contrib_tag not specified, but OTEL_TAG is, then set it to OTEL_TAG
fi
# Get version for contrib.go
OTEL_CONTRIB_VERSION=$(echo "${CONTRIB_TAG}" | egrep -o "${SEMVER_REGEX}")
# Strip leading v
OTEL_CONTRIB_VERSION="${OTEL_CONTRIB_VERSION#v}"
cd "$(dirname "$0")"
if ! git diff --quiet; then \
printf "Working tree is not clean, can't proceed\n"
git status
git diff
exit 1
fi
# Update contrib.go version definition
cp contrib.go contrib.go.bak
sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_CONTRIB_VERSION}\"/" ./contrib.go.bak > ./contrib.go
rm -f ./contrib.go.bak
declare -r BRANCH_NAME=pre_release_${CONTRIB_TAG}
patch_gomods() {
local pkg_=$1
local tag_=$2
# now do the same for all the directories underneath
PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's|^\.\/||' | sort)
# quote any '.' characters in the pkg name
local quoted_pkg_=${pkg_//./\\.}
for dir in $PACKAGE_DIRS; do
cp "${dir}/go.mod" "${dir}/go.mod.bak"
sed "s|${quoted_pkg_}\([^ ]*\) v[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*[^0-9]*.*$|${pkg_}\1 ${tag_}|" "${dir}/go.mod.bak" >"${dir}/go.mod"
rm -f "${dir}/go.mod.bak"
done
}
# branch off from existing main
git checkout -b "${BRANCH_NAME}" main
# Update go.mods
if [ -n "${OTEL_TAG}" ]; then
# first update the top most module
go get "go.opentelemetry.io/otel@${OTEL_TAG}"
patch_gomods go.opentelemetry.io/otel "${OTEL_TAG}"
fi
if [ -n "${CONTRIB_TAG}" ]; then
patch_gomods go.opentelemetry.io/contrib "${CONTRIB_TAG}"
fi
git diff
# Run lint to update go.sum
make lint
# Add changes and commit.
git add .
make ci
# Check whether registry links are up to date
make registry-links-check
declare COMMIT_MSG=""
if [ -n "${OTEL_TAG}" ]; then
COMMIT_MSG+="Bumping otel version to ${OTEL_TAG}"
fi
COMMIT_MSG+=". Prepare for releasing ${CONTRIB_TAG}"
git commit -m "${COMMIT_MSG}"
printf "Now run following to verify the changes.\ngit diff main\n"
printf "\nThen push the changes to upstream\n"

View File

@ -1,178 +0,0 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly PROGNAME=$(basename "$0")
readonly PROGDIR=$(readlink -m "$(dirname "$0")")
readonly EXCLUDE_PACKAGES="tools"
readonly SEMVER_REGEX="v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?"
usage() {
cat <<- EOF
Usage: $PROGNAME [OPTIONS] SEMVER_TAG COMMIT_HASH
Creates git tag for all Go packages in project.
OPTIONS:
-h --help Show this help.
ARGUMENTS:
SEMVER_TAG Semantic version to tag with.
COMMIT_HASH Git commit hash to tag.
EOF
}
cmdline() {
local arg commit
for arg
do
local delim=""
case "$arg" in
# Translate long form options to short form.
--help) args="${args}-h ";;
# Pass through for everything else.
*) [[ "${arg:0:1}" == "-" ]] || delim="\""
args="${args}${delim}${arg}${delim} ";;
esac
done
# Reset and process short form options.
eval set -- "$args"
while getopts "h" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
*)
echo "unknown option: $OPTION"
usage
exit 1
;;
esac
done
# Positional arguments.
shift $((OPTIND-1))
readonly TAG="$1"
if [ -z "$TAG" ]
then
echo "missing SEMVER_TAG"
usage
exit 1
fi
if [[ ! "$TAG" =~ $SEMVER_REGEX ]]
then
printf "invalid semantic version: %s\n" "$TAG"
exit 2
fi
if [[ "$( git tag --list "$TAG" )" ]]
then
printf "tag already exists: %s\n" "$TAG"
exit 2
fi
shift
commit="$1"
if [ -z "$commit" ]
then
echo "missing COMMIT_HASH"
usage
exit 1
fi
# Verify rev is for a commit and unify hashes into a complete SHA1.
readonly SHA="$( git rev-parse --quiet --verify "${commit}^{commit}" )"
if [ -z "$SHA" ]
then
printf "invalid commit hash: %s\n" "$commit"
exit 2
fi
if [ "$( git merge-base "$SHA" HEAD )" != "$SHA" ]
then
printf "commit '%s' not found on this branch\n" "$commit"
exit 2
fi
}
package_dirs() {
# Return a list of package directories in the form:
#
# package/directory/a
# package/directory/b
# deeper/package/directory/a
# ...
#
# Making sure to exclude any packages in the EXCLUDE_PACKAGES regexp.
find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
| grep -E -v "$EXCLUDE_PACKAGES" \
| sed 's/^\.\///' \
| sort
}
git_tag() {
local tag="$1"
local commit="$2"
git tag -a "$tag" -s -m "Version $tag" "$commit"
}
previous_version() {
local current="$1"
# Requires git > 2.0
git tag -l --sort=v:refname \
| grep -E "^${SEMVER_REGEX}$" \
| grep -v "$current" \
| tail -1
}
print_changes() {
local tag="$1"
local previous
previous="$( previous_version "$tag" )"
if [ -n "$previous" ]
then
printf "\nRaw changes made between %s and %s\n" "$previous" "$tag"
printf "======================================\n"
git --no-pager log --pretty=oneline "${previous}..$tag"
fi
}
main() {
local dir
cmdline "$@"
cd "$PROGDIR" || exit 3
# Create tag for root package.
git_tag "$TAG" "$SHA"
printf "created tag: %s\n" "$TAG"
# Create tag for all sub-packages.
for dir in $( package_dirs )
do
git_tag "${dir}/$TAG" "$SHA"
printf "created tag: %s\n" "${dir}/$TAG"
done
print_changes "$TAG"
}
main "$@"

3
vendor/go.opentelemetry.io/otel/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,3 @@
* text=auto eol=lf
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf

View File

@ -10,10 +10,12 @@ coverage.*
gen/
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/prom-collector/prom-collector
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -4,29 +4,243 @@ run:
tests: true #Default
linters:
# Disable everything by default so upgrades to not include new "default
# enabled" linters.
disable-all: true
# Specifically enable linters we want to use.
enable:
- misspell
- goimports
- golint
- deadcode
- depguard
- errcheck
- godot
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues:
# Maximum issues count per one linter.
# Set to 0 to disable.
# Default: 50
# Setting to unlimited so the linter only is run once to debug all issues.
max-issues-per-linter: 0
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
# Setting to unlimited so the linter only is run once to debug all issues.
max-same-issues: 0
# Excluding configuration per-path, per-linter, per-text and per-source.
exclude-rules:
# helpers in tests often (rightfully) pass a *testing.T as their first argument
- path: _test\.go
text: "context.Context should be the first parameter of a function"
# TODO: Having appropriate comments for exported objects helps development,
# even for objects in internal packages. Appropriate comments for all
# exported objects should be added and this exclusion removed.
- path: '.*internal/.*'
text: "exported (method|function|type|const) (.+) should have comment or be unexported"
linters:
- golint
# Yes, they are, but it's okay in a test
- revive
# Yes, they are, but it's okay in a test.
- path: _test\.go
text: "exported func.*returns unexported type.*which can be annoying to use"
linters:
- golint
- revive
# Example test functions should be treated like main.
- path: example.*_test\.go
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
include:
# revive exported should have comment or be unexported.
- EXC0012
# revive package comment should be of the form ...
- EXC0013
linters-settings:
depguard:
# Check the list against standard lib.
# Default: false
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- "crypto/md5"
- "crypto/sha1"
- "crypto/**/pkix"
ignore-file-rules:
- "**/*_test.go"
additional-guards:
# Do not allow testing packages in non-test files.
- list-type: denylist
include-go-root: true
packages:
- testing
- github.com/stretchr/testify
ignore-file-rules:
- "**/*_test.go"
- "**/*test/*.go"
- "**/internal/matchers/*.go"
godot:
exclude:
# Exclude sentence fragments for lists.
- '^[ ]*[-•]'
# Exclude sentences prefixing a list.
- ':$'
goimports:
local-prefixes: go.opentelemetry.io
misspell:
locale: US
ignore-words:
- cancelled
goimports:
local-prefixes: go.opentelemetry.io
revive:
# Sets the default failure confidence.
# This means that linting errors with less than 0.8 confidence will be ignored.
# Default: 0.8
confidence: 0.01
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
- name: blank-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
- name: bool-literal-in-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
- name: context-as-argument
disabled: false
arguments:
allowTypesBefore: "*testing.T"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
- name: context-keys-type
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
- name: deep-exit
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
- name: defer
disabled: false
arguments:
- ["call-chain", "loop"]
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
- name: dot-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
- name: duplicated-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
- name: early-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
- name: empty-block
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
- name: empty-lines
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
- name: error-naming
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
- name: error-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
- name: error-strings
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
- name: errorf
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
- name: exported
disabled: false
arguments:
- "sayRepetitiveInsteadOfStutters"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
- name: flag-parameter
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
- name: identical-branches
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- name: if-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
- name: increment-decrement
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
- name: indent-error-flow
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
- name: import-shadowing
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
- name: package-comments
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
- name: range
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
- name: range-val-in-closure
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
- name: range-val-address
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
- name: redefines-builtin-id
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
- name: string-format
disabled: false
arguments:
- - panic
- '/^[^\n]*$/'
- must not contain line breaks
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
- name: struct-tag
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
- name: superfluous-else
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
- name: time-equal
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
- name: var-naming
disabled: false
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
- name: var-declaration
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
- name: unconditional-recursion
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
- name: unexported-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
- name: unhandled-error
disabled: false
arguments:
- "fmt.Fprint"
- "fmt.Fprintf"
- "fmt.Fprintln"
- "fmt.Print"
- "fmt.Printf"
- "fmt.Println"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
- name: unnecessary-stmt
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
- name: useless-break
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- name: waitgroup-by-value
disabled: false

3
vendor/go.opentelemetry.io/otel/.lycheeignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/

29
vendor/go.opentelemetry.io/otel/.markdownlint.yaml generated vendored Normal file
View File

@ -0,0 +1,29 @@
# Default state for all rules
default: true
# ul-style
MD004: false
# hard-tabs
MD010: false
# line-length
MD013: false
# no-duplicate-header
MD024:
siblings_only: true
#single-title
MD025: false
# ol-prefix
MD029:
style: ordered
# no-inline-html
MD033: false
# fenced-code-language
MD040: false

View File

@ -8,16 +8,637 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.10.0] - 2022-09-09
### Added
- Support Go 1.19. (#3077)
Include compatibility testing and document support. (#3077)
- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
### Changed
### Deprecated
- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
- All exporters will be shutdown even if one reports an error (#3091)
- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
## [1.9.0/0.0.3] - 2022-08-01
### Added
- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
### Fixed
- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
## [1.8.0/0.31.0] - 2022-07-08
### Added
- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
### Changed
- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
### Removed
- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
### Deprecated
- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
Use the equivalent `Scope` struct instead. (#2977)
- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
## [1.7.0/0.30.0] - 2022-04-28
### Added
- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
### Fixed
- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
### Changed
- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
### Deprecated
- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.Attribute` method instead. (#2790)
- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
### Removed
- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
## [0.29.0] - 2022-04-11
### Added
- The metrics global package was added back into several test files. (#2764)
- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
### Removed
- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
### Changed
- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
## [1.6.3] - 2022-04-07
### Fixed
- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
## [1.6.2] - 2022-04-06
### Changed
- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
## [1.6.1] - 2022-03-28
### Fixed
- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
### Security
- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
## [1.6.0/0.28.0] - 2022-03-23
### ⚠️ Notice ⚠️
This update is a breaking change of the unstable Metrics API.
Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
### Added
- Add metrics exponential histogram support.
New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
- Add Go 1.18 to our compatibility tests. (#2679)
- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
### Changed
- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
High-level changes include:
- Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
These `InstrumentProvider`s are managed with a `Meter`.
- Synchronous and asynchronous instruments are grouped into their own packages based on value types.
- Asynchronous callbacks can now be registered with a `Meter`.
Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
### Fixed
- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
## [1.5.0] - 2022-03-16
### Added
- Log the Exporters configuration in the TracerProviders message. (#2578)
- Added support to configure the span limits with environment variables.
The following environment variables are supported. (#2606, #2637)
- `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
- `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_EVENT_COUNT_LIMIT`
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_LINK_COUNT_LIMIT`
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
If the provided environment variables are invalid (negative), the default values would be used.
- Rename the `gc` runtime name to `go` (#2560)
- Add resource container ID detection. (#2418)
- Add span attribute value length limit.
The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
The default limit for this resource is "unlimited". (#2637)
- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
This option replaces the `WithSpanLimits` option.
Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
### Changed
- Drop oldest tracestate `Member` when capacity is reached. (#2592)
- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
### Fixed
- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
- Unlimited span limits are now supported (negative values). (#2636, #2637)
### Deprecated
- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
Use `WithRawSpanLimits` instead.
That option allows setting unlimited and zero limits, this option does not.
This option will be kept until the next major version incremented release. (#2637)
## [1.4.1] - 2022-02-16
### Fixed
- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
## [1.4.0] - 2022-02-11
### Added
- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
To enable use a logger with Verbosity (V level) `>=1`. (#2500)
- Added support to configure the batch span-processor with environment variables.
The following environment variables are used. (#2515)
- `OTEL_BSP_SCHEDULE_DELAY`
- `OTEL_BSP_EXPORT_TIMEOUT`
- `OTEL_BSP_MAX_QUEUE_SIZE`.
- `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
### Changed
- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
### Deprecated
- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
### Fixed
- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
- W3C baggage will now decode urlescaped values. (#2529)
- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
This drop order still only applies to attributes with unique keys not already contained in the span.
If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
### Removed
- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
- [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
- [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
- [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
## [1.3.0] - 2021-12-10
### ⚠️ Notice ⚠️
We have updated the project minimum supported Go version to 1.16
### Added
- Added an internal Logger.
This can be used by the SDK and API to provide users with feedback of the internal state.
To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
### Changed
- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
### Fixed
- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
### Deprecated
- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
### Removed
- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
- Remove the metric Bound Instruments interface and implementations. (#2399)
- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
## [1.2.0] - 2021-11-12
### Changed
- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
- The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
- The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
- The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
### Added
- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
## [1.1.0] - 2021-10-27
### Added
- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
- When upgrading from the `semconv/v1.4.0` package note the following name changes:
- `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
- `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
- `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
- `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
- `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
- `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
### Changed
- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
### Fixed
- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
## [1.0.1] - 2021-10-01
### Fixed
- json stdout exporter no longer crashes due to concurrency bug. (#2265)
## [Metrics 0.24.0] - 2021-10-01
### Changed
- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
- The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
- The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
## [1.0.0] - 2021-09-20
This is the first stable release for the project.
This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
### Added
- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
### Fixed
- Slice-valued attributes can correctly be used as map keys. (#2223)
### Removed
- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
Use the typed functions and methods added to the package instead. (#2235)
- The `Key.Array` method is removed.
- The `Array` function is removed.
- The `Any` function is removed.
- The `ArrayValue` function is removed.
- The `AsArray` function is removed.
## [1.0.0-RC3] - 2021-09-02
### Added
- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
- `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
- Added the `go.opentelemetry.io/otel/example/fib` example package.
Included is an example application that computes Fibonacci numbers. (#2203)
### Changed
- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
- ValueRecorder becomes Histogram
- ValueObserver becomes Gauge
- SumObserver becomes CounterObserver
- UpDownSumObserver becomes UpDownCounterObserver
The API exported from this project is still considered experimental. (#2202)
- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
### Deprecated
- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
The functions from that package should be used instead. (#2166)
- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
Use the typed `*Slice` functions and types added to the package instead. (#2162)
- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
Use the typed functions instead. (#2181)
- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
### Removed
- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
### Fixed
- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195)
- Fixed typos in resources.go. (#2201)
## [1.0.0-RC2] - 2021-07-26
### Added
- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
### Changed
- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
### Deprecated
- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
Use the `trace.ParseTraceState` function instead. (#2122)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
### Fixed
- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
## [Experimental Metrics v0.22.0] - 2021-07-19
### Added
- Adds HTTP support for OTLP metrics exporter. (#2022)
### Removed
- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
## [1.0.0-RC1] / 0.21.0 - 2021-06-18
With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
with major version 0.
### Added
- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
- The following status codes are defined as transient errors:
| gRPC Status Code | Description |
| ---------------- | ----------- |
| 1 | Cancelled |
| 4 | Deadline Exceeded |
| 8 | Resource Exhausted |
| 10 | Aborted |
| 10 | Out of Range |
| 14 | Unavailable |
| 15 | Data Loss |
- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
This method returns the number of list-members the `TraceState` holds. (#1937)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
- Several builtin resource detectors now correctly populate the schema URL. (#1938)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
### Changed
- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
`NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
This method returns the status of a span using the new `Status` type. (#1874)
- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
- Unembed `SpanContext` in `Link`. (#1877)
- Generate Semantic conventions from the specification YAML. (#1891)
- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
- Refactored option types according to the contribution style guide. (#1882)
- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
### Deprecated
- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
### Removed
- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
The `IsRecording` method returns if the span is recording or not.
A read-only span value does not need to know if updates to it will be recorded or not.
By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
- The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
### Fixed
- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
### Security
## [0.20.0] - 2021-04-23
@ -62,6 +683,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
### Fixed
@ -73,6 +695,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
- Fixed typo for default service name in Jaeger Exporter. (#1797)
- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
### Changed
@ -89,19 +713,18 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD`
in compliance with OTel spec (#1752)
to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
- Modify Zipkin Exporter default service name, use default resouce's serviceName instead of empty. (#1777)
- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
- Updated the Jaeger exporter to have a default enpoint, `http://localhost:14250`, for the collector. (#1824)
- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
@ -137,8 +760,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
- The Jaeger exporter `Option` type is removed.
The type is no longer used by the exporter to configure anything.
All of the previous configuration these options provided were duplicates of SDK configuration.
They have all been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
All the previous configurations these options provided were duplicates of SDK configuration.
They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
## [0.19.0] - 2021-03-18
@ -220,7 +843,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Stagger timestamps in exact aggregator tests. (#1569)
- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
- Prevent end-users from implementing some interfaces (#1575)
```
```
"otel/exporters/otlp/otlphttp".Option
"otel/exporters/stdout".Option
"otel/oteltest".Option
@ -233,7 +857,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
"otel/sdk/trace".ParentBasedSamplerOption
"otel/sdk/trace".ReadOnlySpan
"otel/sdk/trace".ReadWriteSpan
```
```
### Removed
- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
@ -815,7 +1440,7 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Rename `Observer` instrument to `ValueObserver`. (#734)
- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
### Fixed
@ -918,7 +1543,6 @@ This release implements the v0.5.0 version of the OpenTelemetry specification.
- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
## [0.4.2] - 2020-03-31
### Fixed
@ -978,7 +1602,7 @@ There is still a possibility of breaking changes.
- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
- The zipkin trace exporter. (#495)
- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
- The `StatusMessage` field was add to the trace `Span`. (#524)
- Add `StatusMessage` field to the trace `Span`. (#524)
- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
- The `Resource` type was added to the SDK. (#528)
- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
@ -1071,14 +1695,12 @@ There is still a possibility of breaking changes.
- `AlwaysParentSample` sampler to the trace API. (#455)
- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
### Changed
- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
- Move correlation context propagation to correlation package. (#479)
- Do not default to putting remote span context into links. (#480)
- Propagators extrac
- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
- Renamed the `export` package to `metric` to match directory structure. (#432)
@ -1226,7 +1848,6 @@ There is still a possibility of breaking changes.
This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
This was corrected. (#333)
## [0.1.2] - 2019-11-18
### Fixed
@ -1286,8 +1907,29 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.20.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD
[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0

View File

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @Aneurysm9
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod

View File

@ -15,11 +15,11 @@ join the meeting or get in touch on
You can view and edit the source code by cloning this repository:
```bash
```sh
git clone https://github.com/open-telemetry/opentelemetry-go.git
```
Run `make test` to run the tests instead of `go test`.
Run `make test` to run the tests instead of `go test`.
There are some generated files checked into the repo. To make sure
that the generated files are up-to-date, run `make` (or `make
@ -43,7 +43,7 @@ To create a new PR, fork the project in GitHub and clone the upstream
repo:
```sh
$ go get -d go.opentelemetry.io/otel
go get -d go.opentelemetry.io/otel
```
(This may print some warning about "build constraints exclude all Go
@ -53,7 +53,7 @@ This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
can alternatively use `git` directly with:
```sh
$ git clone https://github.com/open-telemetry/opentelemetry-go
git clone https://github.com/open-telemetry/opentelemetry-go
```
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
@ -66,20 +66,20 @@ current working directory.
Enter the newly created directory and add your fork as a new remote:
```sh
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md`, and push the branch to your fork:
```sh
$ git checkout -b <YOUR_BRANCH_NAME>
git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
$ make precommit
$ git add -p
$ git commit
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
make precommit
git add -p
git commit
git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
@ -113,6 +113,7 @@ A PR is considered to be **ready to merge** when:
one day and may be merged with a single Maintainer's approval.
* `CHANGELOG.md` has been updated to reflect what has been
added, changed, removed, or fixed.
* `README.md` has been updated if necessary.
* Urgent fix can take exception as long as it has been actively
communicated.
@ -140,8 +141,28 @@ It is preferable to have contributions follow the idioms of the
language rather than conform to specific API names or argument
patterns in the spec.
For a deeper discussion, see:
https://github.com/open-telemetry/opentelemetry-specification/issues/165
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
## Documentation
Each non-example Go Module should have its own `README.md` containing:
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
- Brief description.
- Installation instructions (and requirements if applicable).
- Hyperlink to an example. Depending on the component the example can be:
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- A sample Go application with its own `README.md`, like [here](example/zipkin).
- Additional documentation sections such us:
- Configuration,
- Contributing,
- References.
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
Moreover, it should be possible to navigate to any `README.md` from the
root `README.md`.
## Style Guide
@ -166,32 +187,40 @@ to the reasons why.
### Configuration
When creating an instantiation function for a complex `struct` it is useful
to allow variable number of options to be applied. However, the strong type
system of Go restricts the function design options. There are a few ways to
solve this problem, but we have landed on the following design.
When creating an instantiation function for a complex `type T struct`, it is
useful to allow variable number of options to be applied. However, the strong
type system of Go restricts the function design options. There are a few ways
to solve this problem, but we have landed on the following design.
#### `config`
Configuration should be held in a `struct` named `config`, or prefixed with
specific type name this Configuration applies to if there are multiple
`config` in the package. This `struct` must contain configuration options.
`config` in the package. This type must contain configuration options.
```go
// config contains configuration options for a thing.
type config struct {
// options ...
// options ...
}
```
In general the `config` `struct` will not need to be used externally to the
In general the `config` type will not need to be used externally to the
package and should be unexported. If, however, it is expected that the user
will likely want to build custom options for the configuration, the `config`
should be exported. Please, include in the documentation for the `config`
how the user can extend the configuration.
It is important that `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another.
It is important that internal `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another. The
one exception is the API packages. The configs from the base API, eg.
`go.opentelemetry.io/otel/trace.TracerConfig` and
`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
by the SDK therefor it is expected that these are exported.
When a config is exported we want to maintain forward and backward
compatibility, to achieve this no fields should be exported but should
instead be accessed by methods.
Optionally, it is common to include a `newConfig` function (with the same
naming scheme). This function wraps any defaults setting and looping over
@ -199,14 +228,14 @@ all options to create a configured `config`.
```go
// newConfig returns an appropriately configured config.
func newConfig([]Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
option.Apply(&config)
}
// Preform any validation here.
return config
func newConfig(options ...Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
config = option.apply(config)
}
// Preform any validation here.
return config
}
```
@ -224,10 +253,17 @@ To set the value of the options a `config` contains, a corresponding
```go
type Option interface {
Apply(*config)
apply(config) config
}
```
Having `apply` unexported makes sure that it will not be used externally.
Moreover, the interface becomes sealed so the user cannot easily implement
the interface on its own.
The `apply` method should return a modified version of the passed config.
This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all).
@ -250,53 +286,74 @@ func With*(…) Option { … }
```go
type defaultFalseOption bool
func (o defaultFalseOption) Apply(c *config) {
c.Bool = bool(o)
func (o defaultFalseOption) apply(c config) config {
c.Bool = bool(o)
return c
}
// WithOption sets a T* to have an option included.
// WithOption sets a T to have an option included.
func WithOption() Option {
return defaultFalseOption(true)
return defaultFalseOption(true)
}
```
```go
type defaultTrueOption bool
func (o defaultTrueOption) Apply(c *config) {
c.Bool = bool(o)
func (o defaultTrueOption) apply(c config) config {
c.Bool = bool(o)
return c
}
// WithoutOption sets a T* to have Bool option excluded.
// WithoutOption sets a T to have Bool option excluded.
func WithoutOption() Option {
return defaultTrueOption(false)
return defaultTrueOption(false)
}
````
```
##### Declared Type Options
```go
type myTypeOption struct {
MyType MyType
MyType MyType
}
func (o myTypeOption) Apply(c *config) {
c.MyType = o.MyType
func (o myTypeOption) apply(c config) config {
c.MyType = o.MyType
return c
}
// WithMyType sets T* to have include MyType.
// WithMyType sets T to have include MyType.
func WithMyType(t MyType) Option {
return myTypeOption{t}
return myTypeOption{t}
}
```
##### Functional Options
```go
type optionFunc func(config) config
func (fn optionFunc) apply(c config) config {
return fn(c)
}
// WithMyType sets t as MyType.
func WithMyType(t MyType) Option {
return optionFunc(func(c config) config {
c.MyType = t
return c
})
}
```
#### Instantiation
Using this configuration pattern to configure instantiation with a `New*`
Using this configuration pattern to configure instantiation with a `NewT`
function.
```go
func NewT*(options ...Option) T* {}
func NewT(options ...Option) T {}
```
Any required parameters can be declared before the variadic `options`.
@ -320,12 +377,12 @@ type config struct {
// DogOption apply Dog specific options.
type DogOption interface {
ApplyDog(*config)
applyDog(config) config
}
// BirdOption apply Bird specific options.
type BirdOption interface {
ApplyBird(*config)
applyBird(config) config
}
// Option apply options for all animals.
@ -335,23 +392,42 @@ type Option interface {
}
type weightOption float64
func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
func WithWeight(w float64) Option { return weightOption(w) }
func (o weightOption) applyDog(c config) config {
c.Weight = float64(o)
return c
}
func (o weightOption) applyBird(c config) config {
c.Weight = float64(o)
return c
}
func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string
func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
func WithFurColor(c string) DogOption { return furColorOption(c) }
func (o furColorOption) applyDog(c config) config {
c.Color = string(o)
return c
}
func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64
func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func (o maxAltitudeOption) applyBird(c config) config {
c.MaxAltitude = float64(o)
return c
}
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {}
func NewBird(name string, o ...BirdOption) Bird {}
```
### Interface Type
### Interfaces
To allow other developers to better comprehend the code, it is important
to ensure it is sufficiently documented. One simple measure that contributes
@ -359,21 +435,91 @@ to this aim is self-documenting by naming method parameters. Therefore,
where appropriate, methods of every exported interface type should have
their parameters appropriately named.
#### Interface Stability
All exported stable interfaces that include the following warning in their
doumentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
Otherwise, stable interfaces MUST NOT be modified.
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
be a super-set of the original interface. For example, if you wanted to a
`Close` method to the `Exporter` interface:
```go
type Exporter interface {
Export()
}
```
A new interface, `Closer`, can be added:
```go
type Closer interface {
Close()
}
```
Code that is passed the `Exporter` interface can now check to see if the passed
value also satisfies the new interface. E.g.
```go
func caller(e Exporter) {
/* ... */
if c, ok := e.(Closer); ok {
c.Close()
}
/* ... */
}
```
Alternatively, a new type that is the super-set of an `Exporter` can be created.
```go
type ClosingExporter struct {
Exporter
Close()
}
```
This new type can be used similar to the simple interface above in that a
passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
and the `Close` method called.
This super-set approach can be useful if there is explicit behavior that needs
to be coupled with the original type and passed as a unified type to a new
function, but, because of this coupling, it also limits the applicability of
the added functionality. If there exist other interfaces where this
functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
## Approvers and Maintainers
Approvers:
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM)
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta
Maintainers:
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Tyler Yahn](https://github.com/MrAlias), Splunk
Emeritus:
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community

View File

@ -12,13 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort)
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go
@ -27,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: dependabot-check license-check lint build examples test-default
ci: precommit check-clean-work-tree test-coverage
precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@ -40,114 +38,147 @@ $(TOOLS)/%: | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
$(GO) build -o $@ $(PACKAGE)
MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
SEMCONVGEN = $(TOOLS)/semconvgen
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
DBOTCONF = $(TOOLS)/dbotconf
$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
MISSPELL = $(TOOLS)/misspell
$(TOOLS)/misspell: PACKAGE= github.com/client9/misspell/cmd/misspell
$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
GOCOVMERGE = $(TOOLS)/gocovmerge
$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
STRINGER = $(TOOLS)/stringer
$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
PORTO = $(TOOLS)/porto
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
.PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(STRINGER) $(TOOLS)/gojq
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
# Build
.PHONY: examples generate build
examples:
@set -e; for dir in $(EXAMPLES); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build .); \
done
.PHONY: generate build
generate: $(STRINGER)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) generate $${dir}/..."; \
(cd "$${dir}" && \
PATH="$(TOOLS):$${PATH}" $(GO) generate ./...); \
done
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
generate/%: | $(STRINGER) $(PORTO)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
build: generate
# Build all package code including testing code.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build ./... && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \
done
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build/%: DIR=$*
build/%:
@echo "$(GO) build $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) build ./...
build-tests/%: DIR=$*
build-tests/%:
@echo "$(GO) build tests $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
# Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
.PHONY: $(TEST_TARGETS) test
test-default: ARGS=-v -race
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-verbose: ARGS=-v
test-race: ARGS=-race
test-verbose: ARGS=-v -race
$(TEST_TARGETS): test
test:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \
done
test: $(OTEL_GO_MOD_DIRS:%=test/%)
test/%: DIR=$*
test/%:
@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage
test-coverage:
test-coverage: | $(GOCOVMERGE)
@set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
echo "$(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
(cd "$${dir}" && \
$(GO) list ./... \
| grep -v third_party \
| grep -v 'semconv/v.*' \
| xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=coverage.out -o coverage.html); \
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
done; \
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
golangci-lint/%: | $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
crosslink: | $(CROSSLINK)
@echo "Updating intra-repository dependencies in all go modules" \
&& $(CROSSLINK) --root=$(shell pwd) --prune
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy -compat=1.17
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint
lint: misspell lint-modules | $(GOLANGCI_LINT)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(GOLANGCI_LINT) run --fix && \
$(GOLANGCI_LINT) run); \
done
lint: misspell lint-modules golangci-lint
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
.PHONY: misspell
misspell: | $(MISSPELL)
$(MISSPELL) -w $(ALL_DOCS)
.PHONY: lint-modules
lint-modules: | $(CROSSLINK)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "$(GO) mod tidy in $${dir}"; \
(cd "$${dir}" && \
$(GO) mod tidy); \
done
echo "cross-linking all go modules"
$(CROSSLINK)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
@ -155,18 +186,14 @@ license-check:
exit 1; \
fi
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
do grep -q "$$f" .github/dependabot.yml \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
exit 1; \
fi
dependabot-check: | $(DBOTCONF)
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
.PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF)
@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
.PHONY: check-clean-work-tree
check-clean-work-tree:
@ -177,3 +204,23 @@ check-clean-work-tree:
git status; \
exit 1; \
fi
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
@[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
@[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease
prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
COMMIT ?= "HEAD"
.PHONY: add-tags
add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}

View File

@ -1,23 +1,24 @@
# OpenTelemetry-Go
[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
## Project Status
**Warning**: this project is currently in a pre-GA phase. Backwards
incompatible changes may be introduced in subsequent minor version releases as
we work to track the evolving OpenTelemetry specification and user feedback.
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
| Metrics | Alpha | N/A |
| Logs | Frozen [1] | N/A |
Our progress towards a GA release candidate is tracked in [this project
board](https://github.com/orgs/open-telemetry/projects/5). This release
candidate will follow semantic versioning and will be released with a major
version greater than zero.
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our local
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@ -29,20 +30,41 @@ Project versioning information and stability guarantees can be found in the
### Compatibility
This project is tested on the following systems.
OpenTelemetry-Go ensures compatibility with the current supported versions of
the [Go language](https://golang.org/doc/devel/release#policy):
> Each major Go release is supported until there are two newer major releases.
> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
For versions of Go that are no longer supported upstream, opentelemetry-go will
stop ensuring compatibility with these versions in the following manner:
- A minor release of opentelemetry-go will be made to add support for the new
supported release of Go.
- The following minor release of opentelemetry-go will remove compatibility
testing for the oldest (now archived upstream) version of Go. This, and
future, releases of opentelemetry-go may include features only supported by
the currently supported versions of Go.
Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
| Ubuntu | 1.15 | amd64 |
| Ubuntu | 1.14 | amd64 |
| Ubuntu | 1.15 | 386 |
| Ubuntu | 1.14 | 386 |
| MacOS | 1.15 | amd64 |
| MacOS | 1.14 | amd64 |
| Windows | 1.15 | amd64 |
| Windows | 1.14 | amd64 |
| Windows | 1.15 | 386 |
| Windows | 1.14 | 386 |
| Ubuntu | 1.19 | amd64 |
| Ubuntu | 1.18 | amd64 |
| Ubuntu | 1.17 | amd64 |
| Ubuntu | 1.19 | 386 |
| Ubuntu | 1.18 | 386 |
| Ubuntu | 1.17 | 386 |
| MacOS | 1.19 | amd64 |
| MacOS | 1.18 | amd64 |
| MacOS | 1.17 | amd64 |
| Windows | 1.19 | amd64 |
| Windows | 1.18 | amd64 |
| Windows | 1.17 | amd64 |
| Windows | 1.19 | 386 |
| Windows | 1.18 | 386 |
| Windows | 1.17 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
@ -68,7 +90,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/
If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
to use the
[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
package. The included [examples](./example/) are a good way to see some
practical uses of this process.
@ -77,15 +99,15 @@ practical uses of this process.
Now that your application is instrumented to collect telemetry, it needs an
export pipeline to send that telemetry to an observability platform.
You can find officially supported exporters [here](./exporters/) and in the
companion [contrib
repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters/metric).
Additionally, there are many vendor specific or 3rd party exporters for
OpenTelemetry. These exporters are broken down by
[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
and
[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
support.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
| :-----------------------------------: | :-----: | :----: |
| [Jaeger](./exporters/jaeger/) | | ✓ |
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ |
## Contributing

View File

@ -1,22 +1,51 @@
# Release Process
## Semantic Convention Generation
New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
2. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
export TAG="v1.7.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG"
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
## Pre-Release
First, decide which module sets will be released and update their versions
in `versions.yaml`. Commit this change to a new branch.
Update go.mod for submodules to depend on the new release which will happen in the next step.
1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
1. Run the `prerelease` make target. It creates a branch
`prerelease_<module set>_<new tag>` that will contain all release changes.
```
./pre_release.sh -t <new tag>
make prerelease MODSET=<module set>
```
2. Verify the changes.
```
git diff main
git diff ...prerelease_<module set>_<new tag>
```
This should have changed the version for all modules to be `<new tag>`.
If these changes look correct, merge them into your pre-release branch:
```go
git merge prerelease_<module set>_<new tag>
```
3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
@ -32,24 +61,28 @@ Update go.mod for submodules to depend on the new release which will happen in t
4. Push the changes to upstream and create a Pull Request on GitHub.
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
## Tag
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
Failure to do so will leave things in a broken state.
Failure to do so will leave things in a broken state. As long as you do not
change `versions.yaml` between pre-release and this step, things should be fine.
***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
It is critical you make sure the version you push upstream is correct.
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
1. Run the tag.sh script using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
1. For each module set that will be released, run the `add-tags` make target
using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
```
./tag.sh <new tag> <commit-hash>
make add-tags MODSET=<module set> COMMIT=<commit hash>
```
It should only be necessary to provide an explicit `COMMIT` value if the
current `HEAD` of your working directory is not the correct commit.
2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
Make sure you push all sub-modules as well.
@ -63,7 +96,6 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
## Verify Examples
@ -76,6 +108,15 @@ After releasing verify that examples build outside of the repository.
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
This ensures they build with the published release, not the local copy.
## Contrib Repository
## Post-Release
### Contrib Repository
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
### Website Documentation
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification

View File

@ -12,7 +12,14 @@ is designed so the following goals can be achieved.
* [Semantic import
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
will be used.
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
* Versions will comply with [semver
2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
* New methods may be added to exported API interfaces. All exported
interfaces that fall within this exception will include the following
paragraph in their public documentation.
> Warning: methods may be added to this interface in minor releases.
* If a module is version `v2` or higher, the major version of the module
must be included as a `/vN` at the end of the module paths used in
`go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
@ -143,10 +150,10 @@ The `otel` package is refactored to remove its dependencies on `otel/metric` so
it can be released as stable as well. With that done the following release
candidates are made:
* `otel`: `v1.0.0-rc.1`
* `otel/trace`: `v1.0.0-rc.1`
* `otel/baggage`: `v1.0.0-rc.1`
* `otel/sdk/trace`: `v1.0.0-rc.1`
* `otel`: `v1.0.0-RC1`
* `otel/trace`: `v1.0.0-RC1`
* `otel/baggage`: `v1.0.0-RC1`
* `otel/sdk/trace`: `v1.0.0-RC1`
The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
@ -154,10 +161,10 @@ A few minor issues are discovered in the `otel/trace` package. These issues are
resolved with some minor, but backwards incompatible, changes and are released
as a second release candidate:
* `otel`: `v1.0.0-rc.2`
* `otel/trace`: `v1.0.0-rc.2`
* `otel/baggage`: `v1.0.0-rc.2`
* `otel/sdk/trace`: `v1.0.0-rc.2`
* `otel`: `v1.0.0-RC2`
* `otel/trace`: `v1.0.0-RC2`
* `otel/baggage`: `v1.0.0-RC2`
* `otel/sdk/trace`: `v1.0.0-RC2`
Notice that all module version numbers are incremented to adhere to our
versioning policy.
@ -198,12 +205,12 @@ As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
point where they should be evaluated for stability. The `otel` module is
reintegrated with the `otel/metric` package and the following release is made:
* `otel`: `v1.1.0-rc.1`
* `otel/trace`: `v1.1.0-rc.1`
* `otel/metric`: `v1.1.0-rc.1`
* `otel/baggage`: `v1.1.0-rc.1`
* `otel/sdk/trace`: `v1.1.0-rc.1`
* `otel/sdk/metric`: `v1.1.0-rc.1`
* `otel`: `v1.1.0-RC1`
* `otel/trace`: `v1.1.0-RC1`
* `otel/metric`: `v1.1.0-RC1`
* `otel/baggage`: `v1.1.0-RC1`
* `otel/sdk/trace`: `v1.1.0-RC1`
* `otel/sdk/metric`: `v1.1.0-RC1`
All the modules are evaluated and determined to a viable stable release. They
are then released as version `v1.1.0` (the minor version is incremented to

View File

@ -12,9 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// package attribute provides key and value attributes.
//
// This package is currently in a pre-GA phase. Backwards incompatible changes
// may be introduced in subsequent minor version releases as we work to track
// the evolving OpenTelemetry specification and user feedback.
// Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -21,19 +21,17 @@ import (
)
type (
// Encoder is a mechanism for serializing a label set into a
// specific string representation that supports caching, to
// avoid repeated serialization. An example could be an
// exporter encoding the label set into a wire representation.
// Encoder is a mechanism for serializing an attribute set into a specific
// string representation that supports caching, to avoid repeated
// serialization. An example could be an exporter encoding the attribute
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the label
// set using its Iterator. This result may be cached
// by a attribute.Set.
// Encode returns the serialized encoding of the attribute set using
// its Iterator. This result may be cached by a attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of
// label encoder. Label encoders allocate these using
// `NewEncoderID`.
// ID returns a value that is unique for each class of attribute
// encoder. Attribute encoders allocate these using `NewEncoderID`.
ID() EncoderID
}
@ -43,54 +41,53 @@ type (
value uint64
}
// defaultLabelEncoder uses a sync.Pool of buffers to reduce
// the number of allocations used in encoding labels. This
// implementation encodes a comma-separated list of key=value,
// with '/'-escaping of '=', ',', and '\'.
defaultLabelEncoder struct {
// pool is a pool of labelset builders. The buffers in this
// pool grow to a size that most label encodings will not
// allocate new memory.
// defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
// allocations used in encoding attributes. This implementation encodes a
// comma-separated list of key=value, with '/'-escaping of '=', ',', and
// '\'.
defaultAttrEncoder struct {
// pool is a pool of attribute set builders. The buffers in this pool
// grow to a size that most attribute encodings will not allocate new
// memory.
pool sync.Pool // *bytes.Buffer
}
)
// escapeChar is used to ensure uniqueness of the label encoding where
// keys or values contain either '=' or ','. Since there is no parser
// needed for this encoding and its only requirement is to be unique,
// this choice is arbitrary. Users will see these in some exporters
// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
// escapeChar is used to ensure uniqueness of the attribute encoding where
// keys or values contain either '=' or ','. Since there is no parser needed
// for this encoding and its only requirement is to be unique, this choice is
// arbitrary. Users will see these in some exporters (e.g., stdout), so the
// backslash ('\') is used as a conventional choice.
const escapeChar = '\\'
var (
_ Encoder = &defaultLabelEncoder{}
_ Encoder = &defaultAttrEncoder{}
// encoderIDCounter is for generating IDs for other label
// encoders.
// encoderIDCounter is for generating IDs for other attribute encoders.
encoderIDCounter uint64
defaultEncoderOnce sync.Once
defaultEncoderID = NewEncoderID()
defaultEncoderInstance *defaultLabelEncoder
defaultEncoderInstance *defaultAttrEncoder
)
// NewEncoderID returns a unique label encoder ID. It should be
// called once per each type of label encoder. Preferably in init() or
// in var definition.
// NewEncoderID returns a unique attribute encoder ID. It should be called
// once per each type of attribute encoder. Preferably in init() or in var
// definition.
func NewEncoderID() EncoderID {
return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
}
// DefaultEncoder returns a label encoder that encodes labels
// in such a way that each escaped label's key is followed by an equal
// sign and then by an escaped label's value. All key-value pairs are
// separated by a comma.
// DefaultEncoder returns an attribute encoder that encodes attributes in such
// a way that each escaped attribute's key is followed by an equal sign and
// then by an escaped attribute's value. All key-value pairs are separated by
// a comma.
//
// Escaping is done by prepending a backslash before either a
// backslash, equal sign or a comma.
// Escaping is done by prepending a backslash before either a backslash, equal
// sign or a comma.
func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultLabelEncoder{
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
@ -101,15 +98,14 @@ func DefaultEncoder() Encoder {
return defaultEncoderInstance
}
// Encode is a part of an implementation of the LabelEncoder
// interface.
func (d *defaultLabelEncoder) Encode(iter Iterator) string {
// Encode is a part of an implementation of the AttributeEncoder interface.
func (d *defaultAttrEncoder) Encode(iter Iterator) string {
buf := d.pool.Get().(*bytes.Buffer)
defer d.pool.Put(buf)
buf.Reset()
for iter.Next() {
i, keyValue := iter.IndexedLabel()
i, keyValue := iter.IndexedAttribute()
if i > 0 {
_, _ = buf.WriteRune(',')
}
@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string {
return buf.String()
}
// ID is a part of an implementation of the LabelEncoder interface.
func (*defaultLabelEncoder) ID() EncoderID {
// ID is a part of an implementation of the AttributeEncoder interface.
func (*defaultAttrEncoder) ID() EncoderID {
return defaultEncoderID
}
@ -137,9 +133,9 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val {
switch ch {
case '=', ',', escapeChar:
buf.WriteRune(escapeChar)
_, _ = buf.WriteRune(escapeChar)
}
buf.WriteRune(ch)
_, _ = buf.WriteRune(ch)
}
}

View File

@ -14,16 +14,16 @@
package attribute // import "go.opentelemetry.io/otel/attribute"
// Iterator allows iterating over the set of labels in order,
// sorted by key.
// Iterator allows iterating over the set of attributes in order, sorted by
// key.
type Iterator struct {
storage *Set
idx int
}
// MergeIterator supports iterating over two sets of labels while
// eliminating duplicate values from the combined set. The first
// iterator value takes precedence.
// MergeIterator supports iterating over two sets of attributes while
// eliminating duplicate values from the combined set. The first iterator
// value takes precedence.
type MergeIterator struct {
one oneIterator
two oneIterator
@ -31,13 +31,13 @@ type MergeIterator struct {
}
type oneIterator struct {
iter Iterator
done bool
label KeyValue
iter Iterator
done bool
attr KeyValue
}
// Next moves the iterator to the next position. Returns false if there
// are no more labels.
// Next moves the iterator to the next position. Returns false if there are no
// more attributes.
func (i *Iterator) Next() bool {
i.idx++
return i.idx < i.Len()
@ -45,30 +45,41 @@ func (i *Iterator) Next() bool {
// Label returns current KeyValue. Must be called only after Next returns
// true.
//
// Deprecated: Use Attribute instead.
func (i *Iterator) Label() KeyValue {
return i.Attribute()
}
// Attribute returns the current KeyValue of the Iterator. It must be called
// only after Next returns true.
func (i *Iterator) Attribute() KeyValue {
kv, _ := i.storage.Get(i.idx)
return kv
}
// Attribute is a synonym for Label().
func (i *Iterator) Attribute() KeyValue {
return i.Label()
}
// IndexedLabel returns current index and attribute. Must be called only
// after Next returns true.
//
// Deprecated: Use IndexedAttribute instead.
func (i *Iterator) IndexedLabel() (int, KeyValue) {
return i.idx, i.Label()
return i.idx, i.Attribute()
}
// Len returns a number of labels in the iterator's `*Set`.
// IndexedAttribute returns current index and attribute. Must be called only
// after Next returns true.
func (i *Iterator) IndexedAttribute() (int, KeyValue) {
return i.idx, i.Attribute()
}
// Len returns a number of attributes in the iterated set.
func (i *Iterator) Len() int {
return i.storage.Len()
}
// ToSlice is a convenience function that creates a slice of labels
// from the passed iterator. The iterator is set up to start from the
// beginning before creating the slice.
// ToSlice is a convenience function that creates a slice of attributes from
// the passed iterator. The iterator is set up to start from the beginning
// before creating the slice.
func (i *Iterator) ToSlice() []KeyValue {
l := i.Len()
if l == 0 {
@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue {
i.idx = -1
slice := make([]KeyValue, 0, l)
for i.Next() {
slice = append(slice, i.Label())
slice = append(slice, i.Attribute())
}
return slice
}
// NewMergeIterator returns a MergeIterator for merging two label sets
// NewMergeIterator returns a MergeIterator for merging two attribute sets.
// Duplicates are resolved by taking the value from the first set.
func NewMergeIterator(s1, s2 *Set) MergeIterator {
mi := MergeIterator{
@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator {
func (oi *oneIterator) advance() {
if oi.done = !oi.iter.Next(); !oi.done {
oi.label = oi.iter.Label()
oi.attr = oi.iter.Attribute()
}
}
// Next returns true if there is another label available.
// Next returns true if there is another attribute available.
func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done {
return false
}
if m.one.done {
m.current = m.two.label
m.current = m.two.attr
m.two.advance()
return true
}
if m.two.done {
m.current = m.one.label
m.current = m.one.attr
m.one.advance()
return true
}
if m.one.label.Key == m.two.label.Key {
m.current = m.one.label // first iterator label value wins
if m.one.attr.Key == m.two.attr.Key {
m.current = m.one.attr // first iterator attribute value wins
m.one.advance()
m.two.advance()
return true
}
if m.one.label.Key < m.two.label.Key {
m.current = m.one.label
if m.one.attr.Key < m.two.attr.Key {
m.current = m.one.attr
m.one.advance()
return true
}
m.current = m.two.label
m.current = m.two.attr
m.two.advance()
return true
}
// Label returns the current value after Next() returns true.
//
// Deprecated: Use Attribute instead.
func (m *MergeIterator) Label() KeyValue {
return m.current
}
// Attribute returns the current value after Next() returns true.
func (m *MergeIterator) Attribute() KeyValue {
return m.current
}

View File

@ -20,10 +20,8 @@ type Key string
// Bool creates a KeyValue instance with a BOOL Value.
//
// If creating both key and a bool value at the same time, then
// instead of calling Key(name).Bool(value) consider using a
// convenience function provided by the api/key package -
// key.Bool(name, value).
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Bool(name, value).
func (k Key) Bool(v bool) KeyValue {
return KeyValue{
Key: k,
@ -31,51 +29,21 @@ func (k Key) Bool(v bool) KeyValue {
}
}
// Int64 creates a KeyValue instance with an INT64 Value.
// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
//
// If creating both key and an int64 value at the same time, then
// instead of calling Key(name).Int64(value) consider using a
// convenience function provided by the api/key package -
// key.Int64(name, value).
func (k Key) Int64(v int64) KeyValue {
// If creating both a key and value at the same time, use the provided
// convenience function instead -- BoolSlice(name, value).
func (k Key) BoolSlice(v []bool) KeyValue {
return KeyValue{
Key: k,
Value: Int64Value(v),
}
}
// Float64 creates a KeyValue instance with a FLOAT64 Value.
//
// If creating both key and a float64 value at the same time, then
// instead of calling Key(name).Float64(value) consider using a
// convenience function provided by the api/key package -
// key.Float64(name, value).
func (k Key) Float64(v float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64Value(v),
}
}
// String creates a KeyValue instance with a STRING Value.
//
// If creating both key and a string value at the same time, then
// instead of calling Key(name).String(value) consider using a
// convenience function provided by the api/key package -
// key.String(name, value).
func (k Key) String(v string) KeyValue {
return KeyValue{
Key: k,
Value: StringValue(v),
Value: BoolSliceValue(v),
}
}
// Int creates a KeyValue instance with an INT64 Value.
//
// If creating both key and an int value at the same time, then
// instead of calling Key(name).Int(value) consider using a
// convenience function provided by the api/key package -
// key.Int(name, value).
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int(name, value).
func (k Key) Int(v int) KeyValue {
return KeyValue{
Key: k,
@ -83,20 +51,84 @@ func (k Key) Int(v int) KeyValue {
}
}
// IntSlice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- IntSlice(name, value).
func (k Key) IntSlice(v []int) KeyValue {
return KeyValue{
Key: k,
Value: IntSliceValue(v),
}
}
// Int64 creates a KeyValue instance with an INT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64(name, value).
func (k Key) Int64(v int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64Value(v),
}
}
// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Int64Slice(name, value).
func (k Key) Int64Slice(v []int64) KeyValue {
return KeyValue{
Key: k,
Value: Int64SliceValue(v),
}
}
// Float64 creates a KeyValue instance with a FLOAT64 Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64(v float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64Value(v),
}
}
// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- Float64(name, value).
func (k Key) Float64Slice(v []float64) KeyValue {
return KeyValue{
Key: k,
Value: Float64SliceValue(v),
}
}
// String creates a KeyValue instance with a STRING Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- String(name, value).
func (k Key) String(v string) KeyValue {
return KeyValue{
Key: k,
Value: StringValue(v),
}
}
// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
//
// If creating both a key and value at the same time, use the provided
// convenience function instead -- StringSlice(name, value).
func (k Key) StringSlice(v []string) KeyValue {
return KeyValue{
Key: k,
Value: StringSliceValue(v),
}
}
// Defined returns true for non-empty keys.
func (k Key) Defined() bool {
return len(k) != 0
}
// Array creates a KeyValue instance with a ARRAY Value.
//
// If creating both key and a array value at the same time, then
// instead of calling Key(name).String(value) consider using a
// convenience function provided by the api/key package -
// key.Array(name, value).
func (k Key) Array(v interface{}) KeyValue {
return KeyValue{
Key: k,
Value: ArrayValue(v),
}
}

View File

@ -15,9 +15,7 @@
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
"reflect"
)
// KeyValue holds a key and value pair.
@ -28,81 +26,61 @@ type KeyValue struct {
// Valid returns if kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key != "" && kv.Value.Type() != INVALID
return kv.Key.Defined() && kv.Value.Type() != INVALID
}
// Bool creates a new key-value pair with a passed name and a bool
// value.
// Bool creates a KeyValue with a BOOL Value type.
func Bool(k string, v bool) KeyValue {
return Key(k).Bool(v)
}
// Int64 creates a new key-value pair with a passed name and an int64
// value.
// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
func BoolSlice(k string, v []bool) KeyValue {
return Key(k).BoolSlice(v)
}
// Int creates a KeyValue with an INT64 Value type.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// IntSlice creates a KeyValue with an INT64SLICE Value type.
func IntSlice(k string, v []int) KeyValue {
return Key(k).IntSlice(v)
}
// Int64 creates a KeyValue with an INT64 Value type.
func Int64(k string, v int64) KeyValue {
return Key(k).Int64(v)
}
// Float64 creates a new key-value pair with a passed name and a float64
// value.
// Int64Slice creates a KeyValue with an INT64SLICE Value type.
func Int64Slice(k string, v []int64) KeyValue {
return Key(k).Int64Slice(v)
}
// Float64 creates a KeyValue with a FLOAT64 Value type.
func Float64(k string, v float64) KeyValue {
return Key(k).Float64(v)
}
// String creates a new key-value pair with a passed name and a string
// value.
// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
func Float64Slice(k string, v []float64) KeyValue {
return Key(k).Float64Slice(v)
}
// String creates a KeyValue with a STRING Value type.
func String(k, v string) KeyValue {
return Key(k).String(v)
}
// StringSlice creates a KeyValue with a STRINGSLICE Value type.
func StringSlice(k string, v []string) KeyValue {
return Key(k).StringSlice(v)
}
// Stringer creates a new key-value pair with a passed name and a string
// value generated by the passed Stringer interface.
func Stringer(k string, v fmt.Stringer) KeyValue {
return Key(k).String(v.String())
}
// Int creates a new key-value pair instance with a passed name and
// either an int32 or an int64 value, depending on whether the int
// type is 32 or 64 bits wide.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// Array creates a new key-value pair with a passed name and a array.
// Only arrays of primitive type are supported.
func Array(k string, v interface{}) KeyValue {
return Key(k).Array(v)
}
// Any creates a new key-value pair instance with a passed name and
// automatic type inference. This is slower, and not type-safe.
func Any(k string, value interface{}) KeyValue {
if value == nil {
return String(k, "<nil>")
}
if stringer, ok := value.(fmt.Stringer); ok {
return String(k, stringer.String())
}
rv := reflect.ValueOf(value)
switch rv.Kind() {
case reflect.Array, reflect.Slice:
return Array(k, value)
case reflect.Bool:
return Bool(k, rv.Bool())
case reflect.Int, reflect.Int8, reflect.Int16:
return Int(k, int(rv.Int()))
case reflect.Int64:
return Int64(k, rv.Int())
case reflect.Float64:
return Float64(k, rv.Float())
case reflect.String:
return String(k, rv.String())
}
if b, err := json.Marshal(value); b != nil && err == nil {
return String(k, string(b))
}
return String(k, fmt.Sprint(value))
}

View File

@ -18,57 +18,45 @@ import (
"encoding/json"
"reflect"
"sort"
"sync"
)
type (
// Set is the representation for a distinct label set. It
// manages an immutable set of labels, with an internal cache
// for storing label encodings.
// Set is the representation for a distinct attribute set. It manages an
// immutable set of attributes, with an internal cache for storing
// attribute encodings.
//
// This type supports the `Equivalent` method of comparison
// using values of type `Distinct`.
//
// This type is used to implement:
// 1. Metric labels
// 2. Resource sets
// 3. Correlation map (TODO)
// This type supports the Equivalent method of comparison using values of
// type Distinct.
Set struct {
equivalent Distinct
lock sync.Mutex
encoders [maxConcurrentEncoders]EncoderID
encoded [maxConcurrentEncoders]string
}
// Distinct wraps a variable-size array of `KeyValue`,
// constructed with keys in sorted order. This can be used as
// a map key or for equality checking between Sets.
// Distinct wraps a variable-size array of KeyValue, constructed with keys
// in sorted order. This can be used as a map key or for equality checking
// between Sets.
Distinct struct {
iface interface{}
}
// Filter supports removing certain labels from label sets.
// When the filter returns true, the label will be kept in
// the filtered label set. When the filter returns false, the
// label is excluded from the filtered label set, and the
// label instead appears in the `removed` list of excluded labels.
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
Filter func(KeyValue) bool
// Sortable implements `sort.Interface`, used for sorting
// `KeyValue`. This is an exported type to support a
// memory optimization. A pointer to one of these is needed
// for the call to `sort.Stable()`, which the caller may
// provide in order to avoid an allocation. See
// `NewSetWithSortable()`.
// Sortable implements sort.Interface, used for sorting KeyValue. This is
// an exported type to support a memory optimization. A pointer to one of
// these is needed for the call to sort.Stable(), which the caller may
// provide in order to avoid an allocation. See NewSetWithSortable().
Sortable []KeyValue
)
var (
// keyValueType is used in `computeDistinctReflect`.
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
// emptySet is returned for empty label sets.
// emptySet is returned for empty attribute sets.
emptySet = &Set{
equivalent: Distinct{
iface: [0]KeyValue{},
@ -76,8 +64,6 @@ var (
}
)
const maxConcurrentEncoders = 3
// EmptySet returns a reference to a Set with no elements.
//
// This is a convenience provided for optimized calling utility.
@ -85,30 +71,30 @@ func EmptySet() *Set {
return emptySet
}
// reflect abbreviates `reflect.ValueOf`.
func (d Distinct) reflect() reflect.Value {
// reflectValue abbreviates reflect.ValueOf(d).
func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
}
// Valid returns true if this value refers to a valid `*Set`.
// Valid returns true if this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
}
// Len returns the number of labels in this set.
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
if l == nil || !l.equivalent.Valid() {
return 0
}
return l.equivalent.reflect().Len()
return l.equivalent.reflectValue().Len()
}
// Get returns the KeyValue at ordered position `idx` in this set.
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
if l == nil {
return KeyValue{}, false
}
value := l.equivalent.reflect()
value := l.equivalent.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@ -124,7 +110,7 @@ func (l *Set) Value(k Key) (Value, bool) {
if l == nil {
return Value{}, false
}
rValue := l.equivalent.reflect()
rValue := l.equivalent.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@ -149,7 +135,7 @@ func (l *Set) HasValue(k Key) bool {
return ok
}
// Iter returns an iterator for visiting the labels in this set.
// Iter returns an iterator for visiting the attributes in this set.
func (l *Set) Iter() Iterator {
return Iterator{
storage: l,
@ -157,18 +143,17 @@ func (l *Set) Iter() Iterator {
}
}
// ToSlice returns the set of labels belonging to this set, sorted,
// where keys appear no more than once.
// ToSlice returns the set of attributes belonging to this set, sorted, where
// keys appear no more than once.
func (l *Set) ToSlice() []KeyValue {
iter := l.Iter()
return iter.ToSlice()
}
// Equivalent returns a value that may be used as a map key. The
// Distinct type guarantees that the result will equal the equivalent
// Distinct value of any label set with the same elements as this,
// where sets are made unique by choosing the last value in the input
// for any given key.
// Equivalent returns a value that may be used as a map key. The Distinct type
// guarantees that the result will equal the equivalent. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
if l == nil || !l.equivalent.Valid() {
return emptySet.equivalent
@ -181,52 +166,13 @@ func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
}
// Encoded returns the encoded form of this set, according to
// `encoder`. The result will be cached in this `*Set`.
// Encoded returns the encoded form of this set, according to encoder.
func (l *Set) Encoded(encoder Encoder) string {
if l == nil || encoder == nil {
return ""
}
id := encoder.ID()
if !id.Valid() {
// Invalid IDs are not cached.
return encoder.Encode(l.Iter())
}
var lookup *string
l.lock.Lock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
lookup = &l.encoded[idx]
break
}
}
l.lock.Unlock()
if lookup != nil {
return *lookup
}
r := encoder.Encode(l.Iter())
l.lock.Lock()
defer l.lock.Unlock()
for idx := 0; idx < maxConcurrentEncoders; idx++ {
if l.encoders[idx] == id {
return l.encoded[idx]
}
if !l.encoders[idx].Valid() {
l.encoders[idx] = id
l.encoded[idx] = r
return r
}
}
// TODO: This is a performance cliff. Find a way for this to
// generate a warning.
return r
return encoder.Encode(l.Iter())
}
func empty() Set {
@ -235,39 +181,38 @@ func empty() Set {
}
}
// NewSet returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// Except for empty sets, this method adds an additional allocation
// compared with calls that include a `*Sortable`.
// Except for empty sets, this method adds an additional allocation compared
// with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set {
// Check for empty set.
if len(kvs) == 0 {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
return s //nolint
return s
}
// NewSetWithSortable returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSetWithSortable returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// This call includes a `*Sortable` option as a memory optimization.
// This call includes a Sortable option as a memory optimization.
func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
// Check for empty set.
if len(kvs) == 0 {
return empty()
}
s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
return s //nolint
return s
}
// NewSetWithFiltered returns a new `Set`. See the documentation for
// `NewSetWithSortableFiltered` for more details.
// NewSetWithFiltered returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
// This call includes a `Filter` to include/exclude label keys from
// the return value. Excluded keys are returned as a slice of label
// values.
// This call includes a Filter to include/exclude attribute keys from the
// return value. Excluded keys are returned as a slice of attribute values.
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
@ -276,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
}
// NewSetWithSortableFiltered returns a new `Set`.
// NewSetWithSortableFiltered returns a new Set.
//
// Duplicate keys are eliminated by taking the last value. This
// re-orders the input slice so that unique last-values are contiguous
@ -288,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// - Caller sees the reordering, but doesn't lose values
// - Repeated call preserve last-value wins.
//
// Note that methods are defined on `*Set`, although this returns `Set`.
// Callers can avoid memory allocations by:
// Note that methods are defined on Set, although this returns Set. Callers
// can avoid memory allocations by:
//
// - allocating a `Sortable` for use as a temporary in this method
// - allocating a `Set` for storing the return value of this
// constructor.
// - allocating a Sortable for use as a temporary in this method
// - allocating a Set for storing the return value of this constructor.
//
// The result maintains a cache of encoded labels, by attribute.EncoderID.
// The result maintains a cache of encoded attributes, by attribute.EncoderID.
// This value should not be copied after its first use.
//
// The second `[]KeyValue` return value is a list of labels that were
// The second []KeyValue return value is a list of attributes that were
// excluded by the Filter (if non-nil).
func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
// Check for empty set.
@ -338,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
}, nil
}
// filterSet reorders `kvs` so that included keys are contiguous at
// the end of the slice, while excluded keys precede the included keys.
// filterSet reorders kvs so that included keys are contiguous at the end of
// the slice, while excluded keys precede the included keys.
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
var excluded []KeyValue
// Move labels that do not match the filter so
// they're adjacent before calling computeDistinct().
// Move attributes that do not match the filter so they're adjacent before
// calling computeDistinct().
distinctPosition := len(kvs)
// Swap indistinct keys forward and distinct keys toward the
@ -364,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
}, excluded
}
// Filter returns a filtered copy of this `Set`. See the
// documentation for `NewSetWithSortableFiltered` for more details.
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
return Set{
@ -378,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
return filterSet(l.ToSlice(), re)
}
// computeDistinct returns a `Distinct` using either the fixed- or
// reflect-oriented code path, depending on the size of the input.
// The input slice is assumed to already be sorted and de-duplicated.
// computeDistinct returns a Distinct using either the fixed- or
// reflect-oriented code path, depending on the size of the input. The input
// slice is assumed to already be sorted and de-duplicated.
func computeDistinct(kvs []KeyValue) Distinct {
iface := computeDistinctFixed(kvs)
if iface == nil {
@ -391,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct {
}
}
// computeDistinctFixed computes a `Distinct` for small slices. It
// returns nil if the input is too large for this code path.
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) {
case 1:
@ -440,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
}
}
// computeDistinctReflect computes a `Distinct` using reflection,
// works for any size input.
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
func computeDistinctReflect(kvs []KeyValue) interface{} {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
@ -450,22 +394,31 @@ func computeDistinctReflect(kvs []KeyValue) interface{} {
return at.Interface()
}
// MarshalJSON returns the JSON encoding of the `*Set`.
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
}
// Len implements `sort.Interface`.
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
}
return kvs
}
// Len implements sort.Interface.
func (l *Sortable) Len() int {
return len(*l)
}
// Swap implements `sort.Interface`.
// Swap implements sort.Interface.
func (l *Sortable) Swap(i, j int) {
(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
}
// Less implements `sort.Interface`.
// Less implements sort.Interface.
func (l *Sortable) Less(i, j int) bool {
return (*l)[i].Key < (*l)[j].Key
}

View File

@ -13,12 +13,15 @@ func _() {
_ = x[INT64-2]
_ = x[FLOAT64-3]
_ = x[STRING-4]
_ = x[ARRAY-5]
_ = x[BOOLSLICE-5]
_ = x[INT64SLICE-6]
_ = x[FLOAT64SLICE-7]
_ = x[STRINGSLICE-8]
}
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGARRAY"
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 34}
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {

View File

@ -17,7 +17,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"go.opentelemetry.io/otel/internal"
@ -26,16 +25,14 @@ import (
//go:generate stringer -type=Type
// Type describes the type of the data Value holds.
type Type int
type Type int // nolint: revive // redefines builtin Type.
// Value represents the value part in key-value pairs.
type Value struct {
vtype Type
numeric uint64
stringly string
// TODO Lazy value type?
array interface{}
slice interface{}
}
const (
@ -49,10 +46,14 @@ const (
FLOAT64
// STRING is a string Type Value.
STRING
// ARRAY is an array Type Value used to store 1-dimensional slices or
// arrays of bool, int, int32, int64, uint, uint32, uint64, float,
// float32, float64, or string types.
ARRAY
// BOOLSLICE is a slice of booleans Type Value.
BOOLSLICE
// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
INT64SLICE
// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
FLOAT64SLICE
// STRINGSLICE is a slice of strings Type Value.
STRINGSLICE
)
// BoolValue creates a BOOL Value.
@ -63,6 +64,33 @@ func BoolValue(v bool) Value {
}
}
// BoolSliceValue creates a BOOLSLICE Value.
func BoolSliceValue(v []bool) Value {
cp := make([]bool, len(v))
copy(cp, v)
return Value{
vtype: BOOLSLICE,
slice: &cp,
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
cp := make([]int64, 0, len(v))
for _, i := range v {
cp = append(cp, int64(i))
}
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Int64Value creates an INT64 Value.
func Int64Value(v int64) Value {
return Value{
@ -71,6 +99,16 @@ func Int64Value(v int64) Value {
}
}
// Int64SliceValue creates an INT64SLICE Value.
func Int64SliceValue(v []int64) Value {
cp := make([]int64, len(v))
copy(cp, v)
return Value{
vtype: INT64SLICE,
slice: &cp,
}
}
// Float64Value creates a FLOAT64 Value.
func Float64Value(v float64) Value {
return Value{
@ -79,6 +117,16 @@ func Float64Value(v float64) Value {
}
}
// Float64SliceValue creates a FLOAT64SLICE Value.
func Float64SliceValue(v []float64) Value {
cp := make([]float64, len(v))
copy(cp, v)
return Value{
vtype: FLOAT64SLICE,
slice: &cp,
}
}
// StringValue creates a STRING Value.
func StringValue(v string) Value {
return Value{
@ -87,38 +135,14 @@ func StringValue(v string) Value {
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// ArrayValue creates an ARRAY value from an array or slice.
// Only arrays or slices of bool, int, int64, float, float64, or string types are allowed.
// Specifically, arrays and slices can not contain other arrays, slices, structs, or non-standard
// types. If the passed value is not an array or slice of these types an
// INVALID value is returned.
func ArrayValue(v interface{}) Value {
switch reflect.TypeOf(v).Kind() {
case reflect.Array, reflect.Slice:
// get array type regardless of dimensions
typ := reflect.TypeOf(v).Elem()
kind := typ.Kind()
switch kind {
case reflect.Bool, reflect.Int, reflect.Int64,
reflect.Float64, reflect.String:
val := reflect.ValueOf(v)
length := val.Len()
frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ)))
reflect.Copy(frozen, val)
return Value{
vtype: ARRAY,
array: frozen.Interface(),
}
default:
return Value{vtype: INVALID}
}
// StringSliceValue creates a STRINGSLICE Value.
func StringSliceValue(v []string) Value {
cp := make([]string, len(v))
copy(cp, v)
return Value{
vtype: STRINGSLICE,
slice: &cp,
}
return Value{vtype: INVALID}
}
// Type returns a type of the Value.
@ -132,27 +156,58 @@ func (v Value) AsBool() bool {
return internal.RawToBool(v.numeric)
}
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
// BOOLSLICE.
func (v Value) AsBoolSlice() []bool {
if s, ok := v.slice.(*[]bool); ok {
return *s
}
return nil
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64.
func (v Value) AsInt64() int64 {
return internal.RawToInt64(v.numeric)
}
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsInt64Slice() []int64 {
if s, ok := v.slice.(*[]int64); ok {
return *s
}
return nil
}
// AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64.
func (v Value) AsFloat64() float64 {
return internal.RawToFloat64(v.numeric)
}
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// FLOAT64SLICE.
func (v Value) AsFloat64Slice() []float64 {
if s, ok := v.slice.(*[]float64); ok {
return *s
}
return nil
}
// AsString returns the string value. Make sure that the Value's type
// is STRING.
func (v Value) AsString() string {
return v.stringly
}
// AsArray returns the array Value as an interface{}.
func (v Value) AsArray() interface{} {
return v.array
// AsStringSlice returns the []string value. Make sure that the Value's type is
// STRINGSLICE.
func (v Value) AsStringSlice() []string {
if s, ok := v.slice.(*[]string); ok {
return *s
}
return nil
}
type unknownValueType struct{}
@ -160,16 +215,22 @@ type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
switch v.Type() {
case ARRAY:
return v.AsArray()
case BOOL:
return v.AsBool()
case BOOLSLICE:
return v.AsBoolSlice()
case INT64:
return v.AsInt64()
case INT64SLICE:
return v.AsInt64Slice()
case FLOAT64:
return v.AsFloat64()
case FLOAT64SLICE:
return v.AsFloat64Slice()
case STRING:
return v.stringly
case STRINGSLICE:
return v.AsStringSlice()
}
return unknownValueType{}
}
@ -177,14 +238,20 @@ func (v Value) AsInterface() interface{} {
// Emit returns a string representation of Value's data.
func (v Value) Emit() string {
switch v.Type() {
case ARRAY:
return fmt.Sprint(v.array)
case BOOLSLICE:
return fmt.Sprint(*(v.slice.(*[]bool)))
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
return fmt.Sprint(*(v.slice.(*[]int64)))
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
return fmt.Sprint(*(v.slice.(*[]float64)))
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
return fmt.Sprint(*(v.slice.(*[]string)))
case STRING:
return v.stringly
default:

562
vendor/go.opentelemetry.io/otel/baggage/baggage.go generated vendored Normal file
View File

@ -0,0 +1,562 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage"
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
)
const (
maxMembers = 180
maxBytesPerMembers = 4096
maxBytesPerBaggageString = 8192
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
)
var (
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
errInvalidKey = errors.New("invalid key")
errInvalidValue = errors.New("invalid value")
errInvalidProperty = errors.New("invalid baggage list-member property")
errInvalidMember = errors.New("invalid baggage list-member")
errMemberNumber = errors.New("too many list-members in baggage-string")
errMemberBytes = errors.New("list-member too large")
errBaggageBytes = errors.New("baggage-string too large")
)
// Property is an additional metadata entry for a baggage list-member.
type Property struct {
key, value string
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewKeyProperty returns a new Property for key.
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
if !keyRe.MatchString(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{key: key, hasData: true}
return p, nil
}
// NewKeyValueProperty returns a new Property for key with value.
//
// If key or value are invalid, an error will be returned.
func NewKeyValueProperty(key, value string) (Property, error) {
if !keyRe.MatchString(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
p := Property{
key: key,
value: value,
hasValue: true,
hasData: true,
}
return p, nil
}
func newInvalidProperty() Property {
return Property{}
}
// parseProperty attempts to decode a Property from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
func parseProperty(property string) (Property, error) {
if property == "" {
return newInvalidProperty(), nil
}
match := propertyRe.FindStringSubmatch(property)
if len(match) != 4 {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
p := Property{hasData: true}
if match[1] != "" {
p.key = match[1]
} else {
p.key = match[2]
p.value = match[3]
p.hasValue = true
}
return p, nil
}
// validate ensures p conforms to the W3C Baggage specification, returning an
// error otherwise.
func (p Property) validate() error {
errFunc := func(err error) error {
return fmt.Errorf("invalid property: %w", err)
}
if !p.hasData {
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
}
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if p.hasValue && !valueRe.MatchString(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
return nil
}
// Key returns the Property key.
func (p Property) Key() string {
return p.key
}
// Value returns the Property value. Additionally, a boolean value is returned
// indicating if the returned value is the empty if the Property has a value
// that is empty or if the value is not set.
func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
// String encodes Property into a string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
}
return p.key
}
type properties []Property
func fromInternalProperties(iProps []baggage.Property) properties {
if len(iProps) == 0 {
return nil
}
props := make(properties, len(iProps))
for i, p := range iProps {
props[i] = Property{
key: p.Key,
value: p.Value,
hasValue: p.HasValue,
}
}
return props
}
func (p properties) asInternal() []baggage.Property {
if len(p) == 0 {
return nil
}
iProps := make([]baggage.Property, len(p))
for i, prop := range p {
iProps[i] = baggage.Property{
Key: prop.key,
Value: prop.value,
HasValue: prop.hasValue,
}
}
return iProps
}
func (p properties) Copy() properties {
if len(p) == 0 {
return nil
}
props := make(properties, len(p))
copy(props, p)
return props
}
// validate ensures each Property in p conforms to the W3C Baggage
// specification, returning an error otherwise.
func (p properties) validate() error {
for _, prop := range p {
if err := prop.validate(); err != nil {
return err
}
}
return nil
}
// String encodes properties into a string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
for i, prop := range p {
props[i] = prop.String()
}
return strings.Join(props, propertyDelimiter)
}
// Member is a list-member of a baggage-string as defined by the W3C Baggage
// specification.
type Member struct {
key, value string
properties properties
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewMember returns a new Member from the passed arguments. An error is
// returned if the created Member would be invalid according to the W3C
// Baggage specification.
func NewMember(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
properties: properties(props).Copy(),
hasData: true,
}
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
return m, nil
}
func newInvalidMember() Member {
return Member{}
}
// parseMember attempts to decode a Member from the passed string. It returns
// an error if the input is invalid according to the W3C Baggage
// specification.
func parseMember(member string) (Member, error) {
if n := len(member); n > maxBytesPerMembers {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
var (
key, value string
props properties
)
parts := strings.SplitN(member, propertyDelimiter, 2)
switch len(parts) {
case 2:
// Parse the member properties.
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
}
props = append(props, p)
}
fallthrough
case 1:
// Parse the member key/value pair.
// Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(kv[0])
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
// the bug to slip past the CI checks.
panic("failed to parse baggage member")
}
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification, returning an
// error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
if !keyRe.MatchString(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate()
}
// Key returns the Member key.
func (m Member) Key() string { return m.key }
// Value returns the Member value.
func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
return s
}
// Baggage is a list of baggage members representing the baggage-string as
// defined by the W3C Baggage specification.
type Baggage struct { //nolint:golint
list baggage.List
}
// New returns a new valid Baggage. It returns an error if it results in a
// Baggage exceeding limits set in that specification.
//
// It expects all the provided members to have already been validated.
func New(members ...Member) (Baggage, error) {
if len(members) == 0 {
return Baggage{}, nil
}
b := make(baggage.List)
for _, m := range members {
if !m.hasData {
return Baggage{}, errInvalidMember
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// Check member numbers after deduplication.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
bag := Baggage{b}
if n := len(bag.String()); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
return bag, nil
}
// Parse attempts to decode a baggage-string from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage
// specification.
//
// If there are duplicate list-members contained in baggage, the last one
// defined (reading left-to-right) will be the only one kept. This diverges
// from the W3C Baggage specification which allows duplicate list-members, but
// conforms to the OpenTelemetry Baggage specification.
func Parse(bStr string) (Baggage, error) {
if bStr == "" {
return Baggage{}, nil
}
if n := len(bStr); n > maxBytesPerBaggageString {
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
}
b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) {
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err
}
// OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{
Value: m.value,
Properties: m.properties.asInternal(),
}
}
// OpenTelemetry does not allow for duplicate list-members, but the W3C
// specification does. Now that we have deduplicated, ensure the baggage
// does not exceed list-member limits.
if len(b) > maxMembers {
return Baggage{}, errMemberNumber
}
return Baggage{b}, nil
}
// Member returns the baggage list-member identified by key.
//
// If there is no list-member matching the passed key the returned Member will
// be a zero-value Member.
// The returned member is not validated, as we assume the validation happened
// when it was added to the Baggage.
func (b Baggage) Member(key string) Member {
v, ok := b.list[key]
if !ok {
// We do not need to worry about distinguishing between the situation
// where a zero-valued Member is included in the Baggage because a
// zero-valued Member is invalid according to the W3C Baggage
// specification (it has an empty key).
return newInvalidMember()
}
return Member{
key: key,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}
}
// Members returns all the baggage list-members.
// The order of the returned list-members does not have significance.
//
// The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage.
func (b Baggage) Members() []Member {
if len(b.list) == 0 {
return nil
}
members := make([]Member, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
})
}
return members
}
// SetMember returns a copy the Baggage with the member included. If the
// baggage contains a Member with the same key the existing Member is
// replaced.
//
// If member is invalid according to the W3C Baggage specification, an error
// is returned with the original Baggage.
func (b Baggage) SetMember(member Member) (Baggage, error) {
if !member.hasData {
return b, errInvalidMember
}
n := len(b.list)
if _, ok := b.list[member.key]; !ok {
n++
}
list := make(baggage.List, n)
for k, v := range b.list {
// Do not copy if we are just going to overwrite.
if k == member.key {
continue
}
list[k] = v
}
list[member.key] = baggage.Item{
Value: member.value,
Properties: member.properties.asInternal(),
}
return Baggage{list: list}, nil
}
// DeleteMember returns a copy of the Baggage with the list-member identified
// by key removed.
func (b Baggage) DeleteMember(key string) Baggage {
n := len(b.list)
if _, ok := b.list[key]; ok {
n--
}
list := make(baggage.List, n)
for k, v := range b.list {
if k == key {
continue
}
list[k] = v
}
return Baggage{list: list}
}
// Len returns the number of list-members in the Baggage.
func (b Baggage) Len() int {
return len(b.list)
}
// String encodes Baggage into a string compliant with the W3C Baggage
// specification. The returned string will be invalid if the Baggage contains
// any invalid list-members.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
members = append(members, Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
}.String())
}
return strings.Join(members, listDelimiter)
}

39
vendor/go.opentelemetry.io/otel/baggage/context.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage"
import (
"context"
"go.opentelemetry.io/otel/internal/baggage"
)
// ContextWithBaggage returns a copy of parent with baggage.
func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, b.list)
}
// ContextWithoutBaggage returns a copy of parent with no baggage.
func ContextWithoutBaggage(parent context.Context) context.Context {
// Delegate so any hooks for the OpenTracing bridge are handled.
return baggage.ContextWithList(parent, nil)
}
// FromContext returns the baggage contained in ctx.
func FromContext(ctx context.Context) Baggage {
// Delegate so any hooks for the OpenTracing bridge are handled.
return Baggage{list: baggage.ListFromContext(ctx)}
}

20
vendor/go.opentelemetry.io/otel/baggage/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package baggage provides functionality for storing and retrieving
baggage items in Go context. For propagating the baggage, see the
go.opentelemetry.io/otel/propagation package.
*/
package baggage // import "go.opentelemetry.io/otel/baggage"

View File

@ -15,10 +15,6 @@
/*
Package codes defines the canonical error codes used by OpenTelemetry.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track
the evolving OpenTelemetry specification and user feedback.
It conforms to [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
*/

View File

@ -16,10 +16,6 @@
Package otel provides global access to the OpenTelemetry API. The subpackages of
the otel package provide an implementation of the OpenTelemetry API.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
The provided API is used to instrument code and measure data about that code's
performance and operation. The measured data, by default, is not processed or
transmitted anywhere. An implementation of the OpenTelemetry SDK, like the

View File

@ -16,7 +16,23 @@ package otel // import "go.opentelemetry.io/otel"
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Handle handles any error deemed irremediable by an OpenTelemetry
// component.
Handle(error)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
// ErrorHandlerFunc is a convenience adapter to allow the use of a function
// as an ErrorHandler.
type ErrorHandlerFunc func(error)
var _ ErrorHandler = ErrorHandlerFunc(nil)
// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
func (f ErrorHandlerFunc) Handle(err error) {
f(err)
}

View File

@ -1,31 +0,0 @@
# OpenTelemetry Collector Go Exporter
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp)
This exporter exports OpenTelemetry spans and metrics to the OpenTelemetry Collector.
## Installation and Setup
The exporter can be installed using standard `go` functionality.
```bash
$ go get -u go.opentelemetry.io/otel/exporters/otlp
```
A new exporter can be created using the `NewExporter` function.
## Retries
The exporter will not, by default, retry failed requests to the collector.
However, it is configured in a way that it can be easily enabled.
To enable retries, the `GRPC_GO_RETRY` environment variable needs to be set to `on`. For example,
```
GRPC_GO_RETRY=on go run .
```
The [default service config](https://github.com/grpc/proposal/blob/master/A6-client-retries.md) used by default is defined to retry failed requests with exponential backoff (`0.3seconds * (2)^retry`) with [a max of `5` retries](https://github.com/open-telemetry/oteps/blob/be2a3fcbaa417ebbf5845cd485d34fdf0ab4a2a4/text/0035-opentelemetry-protocol.md#export-response)).
These retries are only attempted for reponses that are [deemed "retry-able" by the collector](https://github.com/grpc/proposal/blob/master/A6-client-retries.md#validation-of-retrypolicy).

View File

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal contains common functionality for all OTLP exporters.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import (
"fmt"
"path"
"strings"
)
// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
func CleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
}
return tmp
}

View File

@ -0,0 +1,148 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// ConfigFn is the generic function used to set a config.
type ConfigFn func(*EnvOptionsReader)
// EnvOptionsReader reads the required environment variables.
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(string) ([]byte, error)
Namespace string
}
// Apply runs every ConfigFn.
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
for _, o := range opts {
o(e)
}
}
// GetEnvValue gets an OTLP environment variable value of the specified key
// using the GetEnv function.
// This function prepends the OTLP specified namespace to all key lookups.
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
return v, v != ""
}
// WithString retrieves the specified config and passes it to ConfigFn as a string.
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(v)
}
}
}
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if d, err := strconv.Atoi(v); err == nil {
fn(time.Duration(d) * time.Millisecond)
}
}
}
}
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(stringToHeader(v))
}
}
}
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if u, err := url.Parse(v); err == nil {
fn(u)
}
}
}
}
// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config.
func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if b, err := e.ReadFile(v); err == nil {
if c, err := createTLSConfig(b); err == nil {
fn(c)
}
}
}
}
}
func keyWithNamespace(ns, key string) string {
if ns == "" {
return key
}
return fmt.Sprintf("%s_%s", ns, key)
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}
func createTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -1,196 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/url"
"os"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel"
)
func ApplyGRPCEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyGRPCEnvConfigs(cfg)
}
func ApplyHTTPEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyHTTPEnvConfigs(cfg)
}
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(filename string) ([]byte, error)
}
func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyHTTPOption(cfg)
}
}
func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyGRPCOption(cfg)
}
}
func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption {
var opts []GenericOption
// Endpoint
if v, ok := e.getEnvValue("ENDPOINT"); ok {
opts = append(opts, WithEndpoint(v))
}
if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok {
opts = append(opts, WithTracesEndpoint(v))
}
if v, ok := e.getEnvValue("METRICS_ENDPOINT"); ok {
opts = append(opts, WithMetricsEndpoint(v))
}
// Certificate File
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTracesTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("METRICS_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithMetricsTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp metrics exporter certificate '%s': %w", path, err))
}
}
// Headers
if h, ok := e.getEnvValue("HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("TRACES_HEADERS"); ok {
opts = append(opts, WithTracesHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("METRICS_HEADERS"); ok {
opts = append(opts, WithMetricsHeaders(stringToHeader(h)))
}
// Compression
if c, ok := e.getEnvValue("COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok {
opts = append(opts, WithTracesCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("METRICS_COMPRESSION"); ok {
opts = append(opts, WithMetricsCompression(stringToCompression(c)))
}
// Timeout
if t, ok := e.getEnvValue("TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTracesTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("METRICS_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithMetricsTimeout(time.Duration(d)*time.Millisecond))
}
}
return opts
}
// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function.
// This function already prepends the OTLP prefix to all key lookup.
func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key)))
return v, v != ""
}
func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) {
b, err := e.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
func stringToCompression(value string) otlp.Compression {
switch value {
case "gzip":
return otlp.GzipCompression
}
return otlp.NoCompression
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}

View File

@ -1,376 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel/exporters/otlp"
)
const (
// DefaultMaxAttempts describes how many times the driver
// should retry the sending of the payload in case of a
// retryable error.
DefaultMaxAttempts int = 5
// DefaultTracesPath is a default URL path for endpoint that
// receives spans.
DefaultTracesPath string = "/v1/traces"
// DefaultMetricsPath is a default URL path for endpoint that
// receives metrics.
DefaultMetricsPath string = "/v1/metrics"
// DefaultBackoff is a default base backoff time used in the
// exponential backoff strategy.
DefaultBackoff time.Duration = 300 * time.Millisecond
// DefaultTimeout is a default max waiting time for the backend to process
// each span or metrics batch.
DefaultTimeout time.Duration = 10 * time.Second
// DefaultServiceConfig is the gRPC service config used if none is
// provided by the user.
DefaultServiceConfig = `{
"methodConfig":[{
"name":[
{ "service":"opentelemetry.proto.collector.metrics.v1.MetricsService" },
{ "service":"opentelemetry.proto.collector.trace.v1.TraceService" }
],
"retryPolicy":{
"MaxAttempts":5,
"InitialBackoff":"0.3s",
"MaxBackoff":"5s",
"BackoffMultiplier":2,
"RetryableStatusCodes":[
"CANCELLED",
"DEADLINE_EXCEEDED",
"RESOURCE_EXHAUSTED",
"ABORTED",
"OUT_OF_RANGE",
"UNAVAILABLE",
"DATA_LOSS"
]
}
}]
}`
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression otlp.Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
}
Config struct {
// Signal specific configurations
Metrics SignalConfig
Traces SignalConfig
// General configurations
MaxAttempts int
Backoff time.Duration
// HTTP configuration
Marshaler otlp.Marshaler
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
}
)
func NewDefaultConfig() Config {
c := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
URLPath: DefaultTracesPath,
Compression: otlp.NoCompression,
Timeout: DefaultTimeout,
},
Metrics: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
URLPath: DefaultMetricsPath,
Compression: otlp.NoCompression,
Timeout: DefaultTimeout,
},
MaxAttempts: DefaultMaxAttempts,
Backoff: DefaultBackoff,
ServiceConfig: DefaultServiceConfig,
}
return c
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(*Config)
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(*Config)
}
func (g *genericOption) ApplyGRPCOption(cfg *Config) {
g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg *Config) {
g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg *Config)) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(*Config)
grpcFn func(*Config)
}
func (g *splitOption) ApplyGRPCOption(cfg *Config) {
g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg *Config) {
g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg *Config), grpcFn func(cfg *Config)) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(*Config)
}
func (h *httpOption) ApplyHTTPOption(cfg *Config) {
h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg *Config)) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(*Config)
}
func (h *grpcOption) ApplyGRPCOption(cfg *Config) {
h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg *Config)) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Endpoint = endpoint
cfg.Metrics.Endpoint = endpoint
})
}
func WithTracesEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Endpoint = endpoint
})
}
func WithMetricsEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Endpoint = endpoint
})
}
func WithCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Compression = compression
cfg.Metrics.Compression = compression
})
}
func WithTracesCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Compression = compression
})
}
func WithMetricsCompression(compression otlp.Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Compression = compression
})
}
func WithTracesURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.URLPath = urlPath
})
}
func WithMetricsURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.URLPath = urlPath
})
}
func WithMaxAttempts(maxAttempts int) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.MaxAttempts = maxAttempts
})
}
func WithBackoff(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Backoff = duration
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Traces.TLSCfg = tlsCfg.Clone()
cfg.Metrics.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithTracesTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Traces.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithMetricsTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Metrics.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Insecure = true
cfg.Metrics.Insecure = true
})
}
func WithInsecureTraces() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Insecure = true
})
}
func WithInsecureMetrics() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Insecure = true
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Headers = headers
cfg.Metrics.Headers = headers
})
}
func WithTracesHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Headers = headers
})
}
func WithMetricsHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Headers = headers
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Timeout = duration
cfg.Metrics.Timeout = duration
})
}
func WithTracesTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Traces.Timeout = duration
})
}
func WithMetricsTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Timeout = duration
})
}

View File

@ -1,69 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
)
const (
WeakCertificate = `
-----BEGIN CERTIFICATE-----
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
Lhnm4N/QDk5rek0=
-----END CERTIFICATE-----
`
WeakPrivateKey = `
-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
-----END PRIVATE KEY-----
`
)
// ReadTLSConfigFromFile reads a PEM certificate file and creates
// a tls.Config that will use this certifate to verify a server certificate.
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,68 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import "fmt"
// PartialSuccessDropKind indicates the kind of partial success error
// received by an OTLP exporter, which corresponds with the signal
// being exported.
type PartialSuccessDropKind string
const (
// TracingPartialSuccess indicates that some spans were rejected.
TracingPartialSuccess PartialSuccessDropKind = "spans"
// MetricsPartialSuccess indicates that some metric data points were rejected.
MetricsPartialSuccess PartialSuccessDropKind = "metric data points"
)
// PartialSuccess represents the underlying error for all handling
// OTLP partial success messages. Use `errors.Is(err,
// PartialSuccess{})` to test whether an error passed to the OTel
// error handler belongs to this category.
type PartialSuccess struct {
ErrorMessage string
RejectedItems int64
RejectedKind PartialSuccessDropKind
}
var _ error = PartialSuccess{}
// Error implements the error interface.
func (ps PartialSuccess) Error() string {
msg := ps.ErrorMessage
if msg == "" {
msg = "empty message"
}
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
}
// Is supports the errors.Is() interface.
func (ps PartialSuccess) Is(err error) bool {
_, ok := err.(PartialSuccess)
return ok
}
// PartialSuccessToError produces an error suitable for passing to
// `otel.Handle()` out of the fields in a partial success response,
// independent of which signal produced the outcome.
func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error {
return PartialSuccess{
ErrorMessage: errorMessage,
RejectedItems: itemsRejected,
RejectedKind: kind,
}
}

View File

@ -0,0 +1,150 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
// explicit throttle responses received.
package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
import (
"context"
"fmt"
"time"
"github.com/cenkalti/backoff/v4"
)
// DefaultConfig are the recommended defaults to use.
var DefaultConfig = Config{
Enabled: true,
InitialInterval: 5 * time.Second,
MaxInterval: 30 * time.Second,
MaxElapsedTime: time.Minute,
}
// Config defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type Config struct {
// Enabled indicates whether to not retry sending batches in case of
// export failure.
Enabled bool
// InitialInterval the time to wait after the first failure before
// retrying.
InitialInterval time.Duration
// MaxInterval is the upper bound on backoff interval. Once this value is
// reached the delay between consecutive retries will always be
// `MaxInterval`.
MaxInterval time.Duration
// MaxElapsedTime is the maximum amount of time (including retries) spent
// trying to send a request/batch. Once this value is reached, the data
// is discarded.
MaxElapsedTime time.Duration
}
// RequestFunc wraps a request with retry logic.
type RequestFunc func(context.Context, func(context.Context) error) error
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
// duration should be honored that was included in the error.
//
// The function must return true if the error argument is retry-able,
// otherwise it must return false for the first return parameter.
//
// The function must return a non-zero time.Duration if the error contains
// explicit throttle duration that should be honored, otherwise it must return
// a zero valued time.Duration.
type EvaluateFunc func(error) (bool, time.Duration)
// RequestFunc returns a RequestFunc using the evaluate function to determine
// if requests can be retried and based on the exponential backoff
// configuration of c.
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
if !c.Enabled {
return func(ctx context.Context, fn func(context.Context) error) error {
return fn(ctx)
}
}
// Do not use NewExponentialBackOff since it calls Reset and the code here
// must call Reset after changing the InitialInterval (this saves an
// unnecessary call to Now).
b := &backoff.ExponentialBackOff{
InitialInterval: c.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
MaxElapsedTime: c.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
b.Reset()
return func(ctx context.Context, fn func(context.Context) error) error {
for {
err := fn(ctx)
if err == nil {
return nil
}
retryable, throttle := evaluate(err)
if !retryable {
return err
}
bOff := b.NextBackOff()
if bOff == backoff.Stop {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
var delay time.Duration
if bOff > throttle {
delay = bOff
} else {
elapsed := b.GetElapsedTime()
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
return fmt.Errorf("max retry time would elapse: %w", err)
}
delay = throttle
}
if err := waitFunc(ctx, delay); err != nil {
return err
}
}
}
}
// Allow override for testing.
var waitFunc = wait
func wait(ctx context.Context, delay time.Duration) error {
timer := time.NewTimer(delay)
defer timer.Stop()
select {
case <-ctx.Done():
// Handle the case where the timer and context deadline end
// simultaneously by prioritizing the timer expiration nil value
// response.
select {
case <-timer.C:
default:
return ctx.Err()
}
case <-timer.C:
}
return nil
}

View File

@ -1,141 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"reflect"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
"go.opentelemetry.io/otel/sdk/resource"
)
// Attributes transforms a slice of KeyValues into a slice of OTLP attribute key-values.
func Attributes(attrs []attribute.KeyValue) []*commonpb.KeyValue {
if len(attrs) == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, len(attrs))
for _, kv := range attrs {
out = append(out, toAttribute(kv))
}
return out
}
// ResourceAttributes transforms a Resource into a slice of OTLP attribute key-values.
func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue {
if resource.Len() == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, resource.Len())
for iter := resource.Iter(); iter.Next(); {
out = append(out, toAttribute(iter.Attribute()))
}
return out
}
func toAttribute(v attribute.KeyValue) *commonpb.KeyValue {
result := &commonpb.KeyValue{
Key: string(v.Key),
Value: new(commonpb.AnyValue),
}
switch v.Value.Type() {
case attribute.BOOL:
result.Value.Value = &commonpb.AnyValue_BoolValue{
BoolValue: v.Value.AsBool(),
}
case attribute.INT64:
result.Value.Value = &commonpb.AnyValue_IntValue{
IntValue: v.Value.AsInt64(),
}
case attribute.FLOAT64:
result.Value.Value = &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Value.AsFloat64(),
}
case attribute.STRING:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: v.Value.AsString(),
}
case attribute.ARRAY:
result.Value.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: arrayValues(v),
},
}
default:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
}
}
return result
}
func arrayValues(kv attribute.KeyValue) []*commonpb.AnyValue {
a := kv.Value.AsArray()
aType := reflect.TypeOf(a)
var valueFunc func(reflect.Value) *commonpb.AnyValue
switch aType.Elem().Kind() {
case reflect.Bool:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v.Bool(),
},
}
}
case reflect.Int, reflect.Int64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v.Int(),
},
}
}
case reflect.Uintptr:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: int64(v.Uint()),
},
}
}
case reflect.Float64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Float(),
},
}
}
case reflect.String:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v.String(),
},
}
}
}
results := make([]*commonpb.AnyValue, aType.Len())
for i, aValue := 0, reflect.ValueOf(a); i < aValue.Len(); i++ {
results[i] = valueFunc(aValue.Index(i))
}
return results
}

View File

@ -1,631 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transform provides translations for opentelemetry-go concepts and
// structures to otlp structures.
package transform
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
)
var (
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
// aggregator is attempted.
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
// ErrIncompatibleAgg is returned when
// aggregation.Kind implies an interface conversion that has
// failed
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
// ErrUnknownValueType is returned when a transformation of an unknown value
// is attempted.
ErrUnknownValueType = errors.New("invalid value type")
// ErrContextCanceled is returned when a context cancellation halts a
// transformation.
ErrContextCanceled = errors.New("context canceled")
// ErrTransforming is returned when an unexected error is encoutered transforming.
ErrTransforming = errors.New("transforming failed")
)
// result is the product of transforming Records into OTLP Metrics.
type result struct {
Resource *resource.Resource
InstrumentationLibrary instrumentation.Library
Metric *metricpb.Metric
Err error
}
// toNanos returns the number of nanoseconds since the UNIX epoch.
func toNanos(t time.Time) uint64 {
if t.IsZero() {
return 0
}
return uint64(t.UnixNano())
}
// CheckpointSet transforms all records contained in a checkpoint into
// batched OTLP ResourceMetrics.
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
records, errc := source(ctx, exportSelector, cps)
// Start a fixed number of goroutines to transform records.
transformed := make(chan result)
var wg sync.WaitGroup
wg.Add(int(numWorkers))
for i := uint(0); i < numWorkers; i++ {
go func() {
defer wg.Done()
transformer(ctx, exportSelector, records, transformed)
}()
}
go func() {
wg.Wait()
close(transformed)
}()
// Synchronously collect the transformed records and transmit.
rms, err := sink(ctx, transformed)
if err != nil {
return nil, err
}
// source is complete, check for any errors.
if err := <-errc; err != nil {
return nil, err
}
return rms, nil
}
// source starts a goroutine that sends each one of the Records yielded by
// the CheckpointSet on the returned chan. Any error encoutered will be sent
// on the returned error chan after seeding is complete.
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
errc := make(chan error, 1)
out := make(chan export.Record)
// Seed records into process.
go func() {
defer close(out)
// No select is needed since errc is buffered.
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
select {
case <-ctx.Done():
return ErrContextCanceled
case out <- r:
}
return nil
})
}()
return out, errc
}
// transformer transforms records read from the passed in chan into
// OTLP Metrics which are sent on the out chan.
func transformer(ctx context.Context, exportSelector export.ExportKindSelector, in <-chan export.Record, out chan<- result) {
for r := range in {
m, err := Record(exportSelector, r)
// Propagate errors, but do not send empty results.
if err == nil && m == nil {
continue
}
res := result{
Resource: r.Resource(),
InstrumentationLibrary: instrumentation.Library{
Name: r.Descriptor().InstrumentationName(),
Version: r.Descriptor().InstrumentationVersion(),
},
Metric: m,
Err: err,
}
select {
case <-ctx.Done():
return
case out <- res:
}
}
}
// sink collects transformed Records and batches them.
//
// Any errors encoutered transforming input will be reported with an
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
// caller to handle any incorrect data in these ResourceMetrics.
func sink(ctx context.Context, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
var errStrings []string
type resourceBatch struct {
Resource *resourcepb.Resource
// Group by instrumentation library name and then the MetricDescriptor.
InstrumentationLibraryBatches map[instrumentation.Library]map[string]*metricpb.Metric
}
// group by unique Resource string.
grouped := make(map[attribute.Distinct]resourceBatch)
for res := range in {
if res.Err != nil {
errStrings = append(errStrings, res.Err.Error())
continue
}
rID := res.Resource.Equivalent()
rb, ok := grouped[rID]
if !ok {
rb = resourceBatch{
Resource: Resource(res.Resource),
InstrumentationLibraryBatches: make(map[instrumentation.Library]map[string]*metricpb.Metric),
}
grouped[rID] = rb
}
mb, ok := rb.InstrumentationLibraryBatches[res.InstrumentationLibrary]
if !ok {
mb = make(map[string]*metricpb.Metric)
rb.InstrumentationLibraryBatches[res.InstrumentationLibrary] = mb
}
mID := res.Metric.GetName()
m, ok := mb[mID]
if !ok {
mb[mID] = res.Metric
continue
}
switch res.Metric.Data.(type) {
case *metricpb.Metric_IntGauge:
m.GetIntGauge().DataPoints = append(m.GetIntGauge().DataPoints, res.Metric.GetIntGauge().DataPoints...)
case *metricpb.Metric_IntHistogram:
m.GetIntHistogram().DataPoints = append(m.GetIntHistogram().DataPoints, res.Metric.GetIntHistogram().DataPoints...)
case *metricpb.Metric_IntSum:
m.GetIntSum().DataPoints = append(m.GetIntSum().DataPoints, res.Metric.GetIntSum().DataPoints...)
case *metricpb.Metric_DoubleGauge:
m.GetDoubleGauge().DataPoints = append(m.GetDoubleGauge().DataPoints, res.Metric.GetDoubleGauge().DataPoints...)
case *metricpb.Metric_DoubleHistogram:
m.GetDoubleHistogram().DataPoints = append(m.GetDoubleHistogram().DataPoints, res.Metric.GetDoubleHistogram().DataPoints...)
case *metricpb.Metric_DoubleSum:
m.GetDoubleSum().DataPoints = append(m.GetDoubleSum().DataPoints, res.Metric.GetDoubleSum().DataPoints...)
default:
}
}
if len(grouped) == 0 {
return nil, nil
}
var rms []*metricpb.ResourceMetrics
for _, rb := range grouped {
rm := &metricpb.ResourceMetrics{Resource: rb.Resource}
for il, mb := range rb.InstrumentationLibraryBatches {
ilm := &metricpb.InstrumentationLibraryMetrics{
Metrics: make([]*metricpb.Metric, 0, len(mb)),
}
if il != (instrumentation.Library{}) {
ilm.InstrumentationLibrary = &commonpb.InstrumentationLibrary{
Name: il.Name,
Version: il.Version,
}
}
for _, m := range mb {
ilm.Metrics = append(ilm.Metrics, m)
}
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
}
rms = append(rms, rm)
}
// Report any transform errors.
if len(errStrings) > 0 {
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
}
return rms, nil
}
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
// error is returned if the Record Aggregator is not supported.
func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricpb.Metric, error) {
agg := r.Aggregation()
switch agg.Kind() {
case aggregation.MinMaxSumCountKind:
mmsc, ok := agg.(aggregation.MinMaxSumCount)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return minMaxSumCount(r, mmsc)
case aggregation.HistogramKind:
h, ok := agg.(aggregation.Histogram)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return histogramPoint(r, exportSelector.ExportKindFor(r.Descriptor(), aggregation.HistogramKind), h)
case aggregation.SumKind:
s, ok := agg.(aggregation.Sum)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
sum, err := s.Sum()
if err != nil {
return nil, err
}
return sumPoint(r, sum, r.StartTime(), r.EndTime(), exportSelector.ExportKindFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
case aggregation.LastValueKind:
lv, ok := agg.(aggregation.LastValue)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
value, tm, err := lv.LastValue()
if err != nil {
return nil, err
}
return gaugePoint(r, value, time.Time{}, tm)
case aggregation.ExactKind:
e, ok := agg.(aggregation.Points)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
pts, err := e.Points()
if err != nil {
return nil, err
}
return gaugeArray(r, pts)
default:
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
}
}
func gaugeArray(record export.Record, points []aggregation.Point) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
pbLabels := stringKeyValues(labels.Iter())
switch nk := desc.NumberKind(); nk {
case number.Int64Kind:
var pts []*metricpb.IntDataPoint
for _, s := range points {
pts = append(pts, &metricpb.IntDataPoint{
Labels: pbLabels,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: s.Number.CoerceToInt64(nk),
})
}
m.Data = &metricpb.Metric_IntGauge{
IntGauge: &metricpb.IntGauge{
DataPoints: pts,
},
}
case number.Float64Kind:
var pts []*metricpb.DoubleDataPoint
for _, s := range points {
pts = append(pts, &metricpb.DoubleDataPoint{
Labels: pbLabels,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: s.Number.CoerceToFloat64(nk),
})
}
m.Data = &metricpb.Metric_DoubleGauge{
DoubleGauge: &metricpb.DoubleGauge{
DataPoints: pts,
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, nk)
}
return m, nil
}
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntGauge{
IntGauge: &metricpb.IntGauge{
DataPoints: []*metricpb.IntDataPoint{
{
Value: num.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleGauge{
DoubleGauge: &metricpb.DoubleGauge{
DataPoints: []*metricpb.DoubleDataPoint{
{
Value: num.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
func exportKindToTemporality(ek export.ExportKind) metricpb.AggregationTemporality {
switch ek {
case export.DeltaExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
case export.CumulativeExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
}
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
func sumPoint(record export.Record, num number.Number, start, end time.Time, ek export.ExportKind, monotonic bool) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntSum{
IntSum: &metricpb.IntSum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.IntDataPoint{
{
Value: num.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleSum{
DoubleSum: &metricpb.DoubleSum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.DoubleDataPoint{
{
Value: num.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discrete values.
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum number.Number, count uint64, err error) {
if min, err = a.Min(); err != nil {
return
}
if max, err = a.Max(); err != nil {
return
}
if sum, err = a.Sum(); err != nil {
return
}
if count, err = a.Count(); err != nil {
return
}
return
}
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
min, max, sum, count, err := minMaxSumCountValues(a)
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
buckets := []uint64{min.AsRaw(), max.AsRaw()}
bounds := []float64{0.0, 100.0}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntHistogram{
IntHistogram: &metricpb.IntHistogram{
DataPoints: []*metricpb.IntHistogramDataPoint{
{
Sum: sum.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: buckets,
ExplicitBounds: bounds,
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleHistogram{
DoubleHistogram: &metricpb.DoubleHistogram{
DataPoints: []*metricpb.DoubleHistogramDataPoint{
{
Sum: sum.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: buckets,
ExplicitBounds: bounds,
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
var buckets aggregation.Buckets
if buckets, err = a.Histogram(); err != nil {
return
}
boundaries, counts = buckets.Boundaries, buckets.Counts
if len(counts) != len(boundaries)+1 {
err = ErrTransforming
return
}
return
}
// histogram transforms a Histogram Aggregator into an OTLP Metric.
func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Histogram) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
boundaries, counts, err := histogramValues(a)
if err != nil {
return nil, err
}
count, err := a.Count()
if err != nil {
return nil, err
}
sum, err := a.Sum()
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_IntHistogram{
IntHistogram: &metricpb.IntHistogram{
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.IntHistogramDataPoint{
{
Sum: sum.CoerceToInt64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: counts,
ExplicitBounds: boundaries,
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_DoubleHistogram{
DoubleHistogram: &metricpb.DoubleHistogram{
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.DoubleHistogramDataPoint{
{
Sum: sum.CoerceToFloat64(n),
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: counts,
ExplicitBounds: boundaries,
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
// stringKeyValues transforms a label iterator into an OTLP StringKeyValues.
func stringKeyValues(iter attribute.Iterator) []*commonpb.StringKeyValue {
l := iter.Len()
if l == 0 {
return nil
}
result := make([]*commonpb.StringKeyValue, 0, l)
for iter.Next() {
kv := iter.Label()
result = append(result, &commonpb.StringKeyValue{
Key: string(kv.Key),
Value: kv.Value.Emit(),
})
}
return result
}

View File

@ -1,45 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
)
const (
// DefaultCollectorPort is the port the Exporter will attempt connect to
// if no collector port is provided.
DefaultCollectorPort uint16 = 4317
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// ExporterOption are setting options passed to an Exporter on creation.
type ExporterOption func(*config)
type config struct {
exportKindSelector metricsdk.ExportKindSelector
}
// WithMetricExportKindSelector defines the ExportKindSelector used
// for selecting AggregationTemporality (i.e., Cumulative vs. Delta
// aggregation). If not specified otherwise, exporter will use a
// cumulative export kind selector.
func WithMetricExportKindSelector(selector metricsdk.ExportKindSelector) ExporterOption {
return func(cfg *config) {
cfg.exportKindSelector = selector
}
}

View File

@ -1,179 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
// Exporter is an OpenTelemetry exporter. It exports both traces and metrics
// from OpenTelemetry instrumented to code using OpenTelemetry protocol
// buffers to a configurable receiver.
type Exporter struct {
cfg config
driver ProtocolDriver
mu sync.RWMutex
started bool
startOnce sync.Once
stopOnce sync.Once
}
var _ tracesdk.SpanExporter = (*Exporter)(nil)
var _ metricsdk.Exporter = (*Exporter)(nil)
// NewExporter constructs a new Exporter and starts it.
func NewExporter(ctx context.Context, driver ProtocolDriver, opts ...ExporterOption) (*Exporter, error) {
exp := NewUnstartedExporter(driver, opts...)
if err := exp.Start(ctx); err != nil {
return nil, err
}
return exp, nil
}
// NewUnstartedExporter constructs a new Exporter and does not start it.
func NewUnstartedExporter(driver ProtocolDriver, opts ...ExporterOption) *Exporter {
cfg := config{
// Note: the default ExportKindSelector is specified
// as Cumulative:
// https://github.com/open-telemetry/opentelemetry-specification/issues/731
exportKindSelector: metricsdk.CumulativeExportKindSelector(),
}
for _, opt := range opts {
opt(&cfg)
}
return &Exporter{
cfg: cfg,
driver: driver,
}
}
var (
errAlreadyStarted = errors.New("already started")
)
// Start establishes connections to the OpenTelemetry collector. Starting an
// already started exporter returns an error.
func (e *Exporter) Start(ctx context.Context) error {
var err = errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
e.mu.Unlock()
err = e.driver.Start(ctx)
})
return err
}
// Shutdown closes all connections and releases resources currently being used
// by the exporter. If the exporter is not started this does nothing. A shut
// down exporter can't be started again. Shutting down an already shut down
// exporter does nothing.
func (e *Exporter) Shutdown(ctx context.Context) error {
e.mu.RLock()
started := e.started
e.mu.RUnlock()
if !started {
return nil
}
var err error
e.stopOnce.Do(func() {
err = e.driver.Stop(ctx)
e.mu.Lock()
e.started = false
e.mu.Unlock()
})
return err
}
// Export transforms and batches metric Records into OTLP Metrics and
// transmits them to the configured collector.
func (e *Exporter) Export(parent context.Context, cps metricsdk.CheckpointSet) error {
return e.driver.ExportMetrics(parent, cps, e.cfg.exportKindSelector)
}
// ExportKindFor reports back to the OpenTelemetry SDK sending this Exporter
// metric telemetry that it needs to be provided in a configured format.
func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) metricsdk.ExportKind {
return e.cfg.exportKindSelector.ExportKindFor(desc, kind)
}
// ExportSpans transforms and batches trace SpanSnapshots into OTLP Trace and
// transmits them to the configured collector.
func (e *Exporter) ExportSpans(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
return e.driver.ExportTraces(ctx, ss)
}
// NewExportPipeline sets up a complete export pipeline
// with the recommended TracerProvider setup.
func NewExportPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,
*sdktrace.TracerProvider, *basic.Controller, error) {
exp, err := NewExporter(ctx, driver, exporterOpts...)
if err != nil {
return nil, nil, nil, err
}
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
cntr := basic.New(
processor.New(
simple.NewWithInexpensiveDistribution(),
exp,
),
)
return exp, tracerProvider, cntr, nil
}
// InstallNewPipeline instantiates a NewExportPipeline with the
// recommended configuration and registers it globally.
func InstallNewPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,
*sdktrace.TracerProvider, *basic.Controller, error) {
exp, tp, cntr, err := NewExportPipeline(ctx, driver, exporterOpts...)
if err != nil {
return nil, nil, nil, err
}
otel.SetTracerProvider(tp)
err = cntr.Start(ctx)
if err != nil {
return nil, nil, nil, err
}
return exp, tp, cntr, err
}

View File

@ -1,278 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc
import (
"context"
"math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
"google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type connection struct {
// Ensure pointer is 64-bit aligned for atomic operations on both 32 and 64 bit machines.
lastConnectErrPtr unsafe.Pointer
// mu protects the connection as it is accessed by the
// exporter goroutines and background connection goroutine
mu sync.Mutex
cc *grpc.ClientConn
// these fields are read-only after constructor is finished
cfg otlpconfig.Config
sCfg otlpconfig.SignalConfig
metadata metadata.MD
newConnectionHandler func(cc *grpc.ClientConn)
// these channels are created once
disconnectedCh chan bool
backgroundConnectionDoneCh chan struct{}
stopCh chan struct{}
// this is for tests, so they can replace the closing
// routine without a worry of modifying some global variable
// or changing it back to original after the test is done
closeBackgroundConnectionDoneCh func(ch chan struct{})
}
func newConnection(cfg otlpconfig.Config, sCfg otlpconfig.SignalConfig, handler func(cc *grpc.ClientConn)) *connection {
c := new(connection)
c.newConnectionHandler = handler
c.cfg = cfg
c.sCfg = sCfg
if len(c.sCfg.Headers) > 0 {
c.metadata = metadata.New(c.sCfg.Headers)
}
c.closeBackgroundConnectionDoneCh = func(ch chan struct{}) {
close(ch)
}
return c
}
func (c *connection) startConnection(ctx context.Context) {
c.stopCh = make(chan struct{})
c.disconnectedCh = make(chan bool, 1)
c.backgroundConnectionDoneCh = make(chan struct{})
if err := c.connect(ctx); err == nil {
c.setStateConnected()
} else {
c.setStateDisconnected(err)
}
go c.indefiniteBackgroundConnection()
}
func (c *connection) lastConnectError() error {
errPtr := (*error)(atomic.LoadPointer(&c.lastConnectErrPtr))
if errPtr == nil {
return nil
}
return *errPtr
}
func (c *connection) saveLastConnectError(err error) {
var errPtr *error
if err != nil {
errPtr = &err
}
atomic.StorePointer(&c.lastConnectErrPtr, unsafe.Pointer(errPtr))
}
func (c *connection) setStateDisconnected(err error) {
c.saveLastConnectError(err)
select {
case c.disconnectedCh <- true:
default:
}
c.newConnectionHandler(nil)
}
func (c *connection) setStateConnected() {
c.saveLastConnectError(nil)
}
func (c *connection) connected() bool {
return c.lastConnectError() == nil
}
const defaultConnReattemptPeriod = 10 * time.Second
func (c *connection) indefiniteBackgroundConnection() {
defer func() {
c.closeBackgroundConnectionDoneCh(c.backgroundConnectionDoneCh)
}()
connReattemptPeriod := c.cfg.ReconnectionPeriod
if connReattemptPeriod <= 0 {
connReattemptPeriod = defaultConnReattemptPeriod
}
// No strong seeding required, nano time can
// already help with pseudo uniqueness.
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
// maxJitterNanos: 70% of the connectionReattemptPeriod
maxJitterNanos := int64(0.7 * float64(connReattemptPeriod))
for {
// Otherwise these will be the normal scenarios to enable
// reconnection if we trip out.
// 1. If we've stopped, return entirely
// 2. Otherwise block until we are disconnected, and
// then retry connecting
select {
case <-c.stopCh:
return
case <-c.disconnectedCh:
// Quickly check if we haven't stopped at the
// same time.
select {
case <-c.stopCh:
return
default:
}
// Normal scenario that we'll wait for
}
if err := c.connect(context.Background()); err == nil {
c.setStateConnected()
} else {
// this code is unreachable in most cases
// c.connect does not establish connection
c.setStateDisconnected(err)
}
// Apply some jitter to avoid lockstep retrials of other
// collector-exporters. Lockstep retrials could result in an
// innocent DDOS, by clogging the machine's resources and network.
jitter := time.Duration(rng.Int63n(maxJitterNanos))
select {
case <-c.stopCh:
return
case <-time.After(connReattemptPeriod + jitter):
}
}
}
func (c *connection) connect(ctx context.Context) error {
cc, err := c.dialToCollector(ctx)
if err != nil {
return err
}
c.setConnection(cc)
c.newConnectionHandler(cc)
return nil
}
// setConnection sets cc as the client connection and returns true if
// the connection state changed.
func (c *connection) setConnection(cc *grpc.ClientConn) bool {
c.mu.Lock()
defer c.mu.Unlock()
// If previous clientConn is same as the current then just return.
// This doesn't happen right now as this func is only called with new ClientConn.
// It is more about future-proofing.
if c.cc == cc {
return false
}
// If the previous clientConn was non-nil, close it
if c.cc != nil {
_ = c.cc.Close()
}
c.cc = cc
return true
}
func (c *connection) dialToCollector(ctx context.Context) (*grpc.ClientConn, error) {
dialOpts := []grpc.DialOption{}
if c.cfg.ServiceConfig != "" {
dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(c.cfg.ServiceConfig))
}
if c.sCfg.GRPCCredentials != nil {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(c.sCfg.GRPCCredentials))
} else if c.sCfg.Insecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
if c.sCfg.Compression == otlp.GzipCompression {
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(c.cfg.DialOptions) != 0 {
dialOpts = append(dialOpts, c.cfg.DialOptions...)
}
ctx, cancel := c.contextWithStop(ctx)
defer cancel()
ctx = c.contextWithMetadata(ctx)
return grpc.DialContext(ctx, c.sCfg.Endpoint, dialOpts...)
}
func (c *connection) contextWithMetadata(ctx context.Context) context.Context {
if c.metadata.Len() > 0 {
return metadata.NewOutgoingContext(ctx, c.metadata)
}
return ctx
}
func (c *connection) shutdown(ctx context.Context) error {
close(c.stopCh)
// Ensure that the backgroundConnector returns
select {
case <-c.backgroundConnectionDoneCh:
case <-ctx.Done():
return ctx.Err()
}
c.mu.Lock()
cc := c.cc
c.cc = nil
c.mu.Unlock()
if cc != nil {
return cc.Close()
}
return nil
}
func (c *connection) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
// Unify the parent context Done signal with the connection's
// stop channel.
ctx, cancel := context.WithCancel(ctx)
go func(ctx context.Context, cancel context.CancelFunc) {
select {
case <-ctx.Done():
// Nothing to do, either cancelled or deadline
// happened.
case <-c.stopCh:
cancel()
}
}(ctx, cancel)
return ctx, cancel
}

View File

@ -1,25 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package otlpgrpc provides an implementation of otlp.ProtocolDriver
that connects to the collector and sends traces and metrics using
gRPC.
This package is currently in a pre-GA phase. Backwards incompatible
changes may be introduced in subsequent minor version releases as we
work to track the evolving OpenTelemetry specification and user
feedback.
*/
package otlpgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"

View File

@ -1,195 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/transform"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type driver struct {
metricsDriver metricsDriver
tracesDriver tracesDriver
}
type metricsDriver struct {
connection *connection
lock sync.Mutex
metricsClient colmetricpb.MetricsServiceClient
}
type tracesDriver struct {
connection *connection
lock sync.Mutex
tracesClient coltracepb.TraceServiceClient
}
var (
errNoClient = errors.New("no client")
)
// NewDriver creates a new gRPC protocol driver.
func NewDriver(opts ...Option) otlp.ProtocolDriver {
cfg := otlpconfig.NewDefaultConfig()
otlpconfig.ApplyGRPCEnvConfigs(&cfg)
for _, opt := range opts {
opt.ApplyGRPCOption(&cfg)
}
d := &driver{}
d.tracesDriver = tracesDriver{
connection: newConnection(cfg, cfg.Traces, d.tracesDriver.handleNewConnection),
}
d.metricsDriver = metricsDriver{
connection: newConnection(cfg, cfg.Metrics, d.metricsDriver.handleNewConnection),
}
return d
}
func (md *metricsDriver) handleNewConnection(cc *grpc.ClientConn) {
md.lock.Lock()
defer md.lock.Unlock()
if cc != nil {
md.metricsClient = colmetricpb.NewMetricsServiceClient(cc)
} else {
md.metricsClient = nil
}
}
func (td *tracesDriver) handleNewConnection(cc *grpc.ClientConn) {
td.lock.Lock()
defer td.lock.Unlock()
if cc != nil {
td.tracesClient = coltracepb.NewTraceServiceClient(cc)
} else {
td.tracesClient = nil
}
}
// Start implements otlp.ProtocolDriver. It establishes a connection
// to the collector.
func (d *driver) Start(ctx context.Context) error {
d.tracesDriver.connection.startConnection(ctx)
d.metricsDriver.connection.startConnection(ctx)
return nil
}
// Stop implements otlp.ProtocolDriver. It shuts down the connection
// to the collector.
func (d *driver) Stop(ctx context.Context) error {
if err := d.tracesDriver.connection.shutdown(ctx); err != nil {
return err
}
return d.metricsDriver.connection.shutdown(ctx)
}
// ExportMetrics implements otlp.ProtocolDriver. It transforms metrics
// to protobuf binary format and sends the result to the collector.
func (d *driver) ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
if !d.metricsDriver.connection.connected() {
return fmt.Errorf("metrics exporter is disconnected from the server %s: %w", d.metricsDriver.connection.sCfg.Endpoint, d.metricsDriver.connection.lastConnectError())
}
ctx, cancel := d.metricsDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.metricsDriver.connection.sCfg.Timeout)
defer tCancel()
rms, err := transform.CheckpointSet(ctx, selector, cps, 1)
if err != nil {
return err
}
if len(rms) == 0 {
return nil
}
return d.metricsDriver.uploadMetrics(ctx, rms)
}
func (md *metricsDriver) uploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error {
ctx = md.connection.contextWithMetadata(ctx)
err := func() error {
md.lock.Lock()
defer md.lock.Unlock()
if md.metricsClient == nil {
return errNoClient
}
_, err := md.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: protoMetrics,
})
return err
}()
if err != nil {
md.connection.setStateDisconnected(err)
}
return err
}
// ExportTraces implements otlp.ProtocolDriver. It transforms spans to
// protobuf binary format and sends the result to the collector.
func (d *driver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
if !d.tracesDriver.connection.connected() {
return fmt.Errorf("traces exporter is disconnected from the server %s: %w", d.tracesDriver.connection.sCfg.Endpoint, d.tracesDriver.connection.lastConnectError())
}
ctx, cancel := d.tracesDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.tracesDriver.connection.sCfg.Timeout)
defer tCancel()
protoSpans := transform.SpanData(ss)
if len(protoSpans) == 0 {
return nil
}
return d.tracesDriver.uploadTraces(ctx, protoSpans)
}
func (td *tracesDriver) uploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
ctx = td.connection.contextWithMetadata(ctx)
err := func() error {
td.lock.Lock()
defer td.lock.Unlock()
if td.tracesClient == nil {
return errNoClient
}
_, err := td.tracesClient.Export(ctx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
})
return err
}()
if err != nil {
td.connection.setStateDisconnected(err)
}
return err
}

View File

@ -1,202 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc
import (
"fmt"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// Option applies an option to the gRPC driver.
type Option interface {
otlpconfig.GRPCOption
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() Option {
return otlpconfig.WithInsecure()
}
// WithTracesInsecure disables client transport security for the traces exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithTracesInsecure() Option {
return otlpconfig.WithInsecureTraces()
}
// WithInsecureMetrics disables client transport security for the metrics exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecureMetrics() Option {
return otlpconfig.WithInsecureMetrics()
}
// WithEndpoint allows one to set the endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithEndpoint(endpoint string) Option {
return otlpconfig.WithEndpoint(endpoint)
}
// WithTracesEndpoint allows one to set the traces endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithTracesEndpoint(endpoint string) Option {
return otlpconfig.WithTracesEndpoint(endpoint)
}
// WithMetricsEndpoint allows one to set the metrics endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithMetricsEndpoint(endpoint string) Option {
return otlpconfig.WithMetricsEndpoint(endpoint)
}
// WithReconnectionPeriod allows one to set the delay between next connection attempt
// after failing to connect with the collector.
func WithReconnectionPeriod(rp time.Duration) otlpconfig.GRPCOption {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ReconnectionPeriod = rp
})
}
func compressorToCompression(compressor string) otlp.Compression {
switch compressor {
case "gzip":
return otlp.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlp.NoCompression
}
// WithCompressor will set the compressor for the gRPC client to use when sending requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithCompressor(compressor string) Option {
return otlpconfig.WithCompression(compressorToCompression(compressor))
}
// WithTracesCompression will set the compressor for the gRPC client to use when sending traces requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithTracesCompression(compressor string) Option {
return otlpconfig.WithTracesCompression(compressorToCompression(compressor))
}
// WithMetricsCompression will set the compressor for the gRPC client to use when sending metrics requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithMetricsCompression(compressor string) Option {
return otlpconfig.WithMetricsCompression(compressorToCompression(compressor))
}
// WithHeaders will send the provided headers with gRPC requests.
func WithHeaders(headers map[string]string) Option {
return otlpconfig.WithHeaders(headers)
}
// WithTracesHeaders will send the provided headers with gRPC traces requests.
func WithTracesHeaders(headers map[string]string) Option {
return otlpconfig.WithTracesHeaders(headers)
}
// WithMetricsHeaders will send the provided headers with gRPC metrics requests.
func WithMetricsHeaders(headers map[string]string) Option {
return otlpconfig.WithMetricsHeaders(headers)
}
// WithTLSCredentials allows the connection to use TLS credentials
// when talking to the server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Traces.GRPCCredentials = creds
cfg.Metrics.GRPCCredentials = creds
})
}
// WithTracesTLSCredentials allows the connection to use TLS credentials
// when talking to the traces server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTracesTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Traces.GRPCCredentials = creds
})
}
// WithMetricsTLSCredentials allows the connection to use TLS credentials
// when talking to the metrics server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithMetricsTLSCredentials(creds credentials.TransportCredentials) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Metrics.GRPCCredentials = creds
})
}
// WithServiceConfig defines the default gRPC service config used.
func WithServiceConfig(serviceConfig string) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ServiceConfig = serviceConfig
})
}
// WithDialOption opens support to any grpc.DialOption to be used. If it conflicts
// with some other configuration the GRPC specified via the collector the ones here will
// take preference since they are set last.
func WithDialOption(opts ...grpc.DialOption) Option {
return otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.DialOptions = opts
})
}
// WithTimeout tells the driver the max waiting time for the backend to process
// each spans or metrics batch. If unset, the default will be 10 seconds.
func WithTimeout(duration time.Duration) Option {
return otlpconfig.WithTimeout(duration)
}
// WithTracesTimeout tells the driver the max waiting time for the backend to process
// each spans batch. If unset, the default will be 10 seconds.
func WithTracesTimeout(duration time.Duration) Option {
return otlpconfig.WithTracesTimeout(duration)
}
// WithMetricsTimeout tells the driver the max waiting time for the backend to process
// each metrics batch. If unset, the default will be 10 seconds.
func WithMetricsTimeout(duration time.Duration) Option {
return otlpconfig.WithMetricsTimeout(duration)
}

View File

@ -0,0 +1,51 @@
# OpenTelemetry-Go OTLP Span Exporter
[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation.
## Installation
```
go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
```
## Examples
- [Exporter setup and examples](./otlptracehttp/example_test.go)
- [Full example sending telemetry to a local collector](../../../example/otel-collector)
## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
This exporter is configured using a client satisfying the `otlptrace.Client` interface.
This client handles the transformation of data into wire format and the transmission of that data to the collector.
## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
## Configuration
### Environment Variables
The following environment variables can be used (instead of options objects) to
override the default configuration. For more information about how each of
these environment variables is interpreted, see [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md).
| Environment variable | Option | Default value |
| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | |
| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | |
| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | |
| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` |
[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
Configuration using options have precedence over the environment variables.

View File

@ -0,0 +1,54 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
import (
"context"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
// Client manages connections to the collector, handles the
// transformation of data into wire format, and the transmission of that
// data to the collector.
type Client interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
Start(ctx context.Context) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with UploadTraces, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
Stop(ctx context.Context) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// UploadTraces should transform the passed traces to the wire
// format and send it to the collector. May be called
// concurrently.
UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}

View File

@ -0,0 +1,113 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
var (
errAlreadyStarted = errors.New("already started")
)
// Exporter exports trace data in the OTLP wire format.
type Exporter struct {
client Client
mu sync.RWMutex
started bool
startOnce sync.Once
stopOnce sync.Once
}
// ExportSpans exports a batch of spans.
func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
protoSpans := tracetransform.Spans(ss)
if len(protoSpans) == 0 {
return nil
}
return e.client.UploadTraces(ctx, protoSpans)
}
// Start establishes a connection to the receiving endpoint.
func (e *Exporter) Start(ctx context.Context) error {
var err = errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
e.mu.Unlock()
err = e.client.Start(ctx)
})
return err
}
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
func (e *Exporter) Shutdown(ctx context.Context) error {
e.mu.RLock()
started := e.started
e.mu.RUnlock()
if !started {
return nil
}
var err error
e.stopOnce.Do(func() {
err = e.client.Stop(ctx)
e.mu.Lock()
e.started = false
e.mu.Unlock()
})
return err
}
var _ tracesdk.SpanExporter = (*Exporter)(nil)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, client Client) (*Exporter, error) {
exp := NewUnstarted(client)
if err := exp.Start(ctx); err != nil {
return nil, err
}
return exp, nil
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(client Client) *Exporter {
return &Exporter{
client: client,
}
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct {
Type string
Client Client
}{
Type: "otlptrace",
Client: e.client,
}
}

View File

@ -0,0 +1,127 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"net/url"
"os"
"path"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
)
// DefaultEnvOptionsReader is the default environments reader.
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: os.ReadFile,
Namespace: "OTEL_EXPORTER_OTLP",
}
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
func ApplyGRPCEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
return cfg
}
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
func ApplyHTTPEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
return cfg
}
func getOptionsFromEnv() []GenericOption {
opts := []GenericOption{}
DefaultEnvOptionsReader.Apply(
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Traces.Endpoint = u.Host
// For OTLP/HTTP endpoint URLs without a per-signal
// configuration, the passed endpoint is used as a base URL
// and the signals are sent to these paths relative to that.
cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Traces.Endpoint = u.Host
// For endpoint URLs for OTLP/HTTP per-signal variables, the
// URL MUST be used as-is without any modification. The only
// exception is that if an URL contains no path part, the root
// path / MUST be used.
path := u.Path
if path == "" {
path = "/"
}
cfg.Traces.URLPath = path
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
)
return opts
}
func withEndpointScheme(u *url.URL) GenericOption {
switch strings.ToLower(u.Scheme) {
case "http", "unix":
return WithInsecure()
default:
return WithSecure()
}
}
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
return func(cfg Config) Config {
// For OTLP/gRPC endpoints, this is the target to which the
// exporter is going to send telemetry.
cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
return cfg
}
}
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
cp := NoCompression
if v == "gzip" {
cp = GzipCompression
}
fn(cp)
}
}
}

View File

@ -0,0 +1,307 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
)
const (
// DefaultTracesPath is a default URL path for endpoint that
// receives spans.
DefaultTracesPath string = "/v1/traces"
// DefaultTimeout is a default max waiting time for the backend to process
// each span batch.
DefaultTimeout time.Duration = 10 * time.Second
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
}
Config struct {
// Signal specific configurations
Traces SignalConfig
RetryConfig retry.Config
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
GRPCConn *grpc.ClientConn
}
)
// NewHTTPConfig returns a new Config with all settings applied from opts and
// any unset setting using the default HTTP config values.
func NewHTTPConfig(opts ...HTTPOption) Config {
cfg := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
URLPath: DefaultTracesPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyHTTPEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath)
return cfg
}
// NewGRPCConfig returns a new Config with all settings applied from opts and
// any unset setting using the default gRPC config values.
func NewGRPCConfig(opts ...GRPCOption) Config {
cfg := Config{
Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
URLPath: DefaultTracesPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
}
// Priroritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Traces.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
} else if cfg.Traces.Insecure {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
} else {
// Default to using the host's root CA.
creds := credentials.NewTLS(nil)
cfg.Traces.GRPCCredentials = creds
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
}
if cfg.Traces.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(cfg.DialOptions) != 0 {
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
}
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
MinConnectTimeout: cfg.ReconnectionPeriod,
}
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
}
return cfg
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(Config) Config
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(Config) Config
}
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
return g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
return g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg Config) Config) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(Config) Config
grpcFn func(Config) Config
}
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
return g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
return g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(Config) Config
}
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
return h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(Config) Config
}
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
return h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint
return cfg
})
}
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression
return cfg
})
}
func WithURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.URLPath = urlPath
return cfg
})
}
func WithRetry(rc retry.Config) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.RetryConfig = rc
return cfg
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg Config) Config {
cfg.Traces.TLSCfg = tlsCfg.Clone()
return cfg
}, func(cfg Config) Config {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
return cfg
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = true
return cfg
})
}
func WithSecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = false
return cfg
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Headers = headers
return cfg
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Traces.Timeout = duration
return cfg
})
}

View File

@ -12,7 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
const (
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
DefaultCollectorGRPCPort uint16 = 4317
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
DefaultCollectorHTTPPort uint16 = 4318
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// Compression describes the compression used for payloads sent to the
// collector.
@ -27,7 +37,7 @@ const (
GzipCompression
)
// Marshaler describes the kind of message format sent to the collector
// Marshaler describes the kind of message format sent to the collector.
type Marshaler int
const (

View File

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
)
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,158 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
if len(attrs) == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, len(attrs))
for _, kv := range attrs {
out = append(out, KeyValue(kv))
}
return out
}
// Iterator transforms an attribute iterator into OTLP key-values.
func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
l := iter.Len()
if l == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, l)
for iter.Next() {
out = append(out, KeyValue(iter.Attribute()))
}
return out
}
// ResourceAttributes transforms a Resource OTLP key-values.
func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
return Iterator(res.Iter())
}
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
}
// Value transforms an attribute Value into an OTLP AnyValue.
func Value(v attribute.Value) *commonpb.AnyValue {
av := new(commonpb.AnyValue)
switch v.Type() {
case attribute.BOOL:
av.Value = &commonpb.AnyValue_BoolValue{
BoolValue: v.AsBool(),
}
case attribute.BOOLSLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: boolSliceValues(v.AsBoolSlice()),
},
}
case attribute.INT64:
av.Value = &commonpb.AnyValue_IntValue{
IntValue: v.AsInt64(),
}
case attribute.INT64SLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: int64SliceValues(v.AsInt64Slice()),
},
}
case attribute.FLOAT64:
av.Value = &commonpb.AnyValue_DoubleValue{
DoubleValue: v.AsFloat64(),
}
case attribute.FLOAT64SLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: float64SliceValues(v.AsFloat64Slice()),
},
}
case attribute.STRING:
av.Value = &commonpb.AnyValue_StringValue{
StringValue: v.AsString(),
}
case attribute.STRINGSLICE:
av.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: stringSliceValues(v.AsStringSlice()),
},
}
default:
av.Value = &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
}
}
return av
}
func boolSliceValues(vals []bool) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v,
},
}
}
return converted
}
func int64SliceValues(vals []int64) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v,
},
}
}
return converted
}
func float64SliceValues(vals []float64) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v,
},
}
}
return converted
}
func stringSliceValues(vals []string) []*commonpb.AnyValue {
converted := make([]*commonpb.AnyValue, len(vals))
for i, v := range vals {
converted[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v,
},
}
}
return converted
}

View File

@ -12,19 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
"go.opentelemetry.io/otel/sdk/instrumentation"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
func instrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary {
if il == (instrumentation.Library{}) {
func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
if il == (instrumentation.Scope{}) {
return nil
}
return &commonpb.InstrumentationLibrary{
return &commonpb.InstrumentationScope{
Name: il.Name,
Version: il.Version,
}

View File

@ -12,12 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/sdk/resource"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
)
// Resource transforms a Resource into an OTLP Resource.

View File

@ -12,36 +12,31 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"go.opentelemetry.io/otel/sdk/instrumentation"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
const (
maxMessageEventsPerSpan = 128
)
// SpanData transforms a slice of SpanSnapshot into a slice of OTLP
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
// ResourceSpans.
func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
if len(sdl) == 0 {
return nil
}
rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
type ilsKey struct {
type key struct {
r attribute.Distinct
il instrumentation.Library
is instrumentation.Scope
}
ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans)
ssm := make(map[key]*tracepb.ScopeSpans)
var resources int
for _, sd := range sdl {
@ -49,29 +44,31 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
continue
}
rKey := sd.Resource.Equivalent()
iKey := ilsKey{
rKey := sd.Resource().Equivalent()
k := key{
r: rKey,
il: sd.InstrumentationLibrary,
is: sd.InstrumentationScope(),
}
ils, iOk := ilsm[iKey]
scopeSpan, iOk := ssm[k]
if !iOk {
// Either the resource or instrumentation library were unknown.
ils = &tracepb.InstrumentationLibrarySpans{
InstrumentationLibrary: instrumentationLibrary(sd.InstrumentationLibrary),
Spans: []*tracepb.Span{},
// Either the resource or instrumentation scope were unknown.
scopeSpan = &tracepb.ScopeSpans{
Scope: InstrumentationScope(sd.InstrumentationScope()),
Spans: []*tracepb.Span{},
SchemaUrl: sd.InstrumentationScope().SchemaURL,
}
}
ils.Spans = append(ils.Spans, span(sd))
ilsm[iKey] = ils
scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
ssm[k] = scopeSpan
rs, rOk := rsm[rKey]
if !rOk {
resources++
// The resource was unknown.
rs = &tracepb.ResourceSpans{
Resource: Resource(sd.Resource),
InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils},
Resource: Resource(sd.Resource()),
ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
SchemaUrl: sd.Resource().SchemaURL(),
}
rsm[rKey] = rs
continue
@ -81,9 +78,9 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
// library lookup was unknown because if so we need to add it to the
// ResourceSpans. Otherwise, the instrumentation library has already
// been seen and the append we did above will be included it in the
// InstrumentationLibrarySpans reference.
// ScopeSpans reference.
if !iOk {
rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils)
rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
}
}
@ -96,32 +93,32 @@ func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
}
// span transforms a Span into an OTLP span.
func span(sd *tracesdk.SpanSnapshot) *tracepb.Span {
func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
if sd == nil {
return nil
}
tid := sd.SpanContext.TraceID()
sid := sd.SpanContext.SpanID()
tid := sd.SpanContext().TraceID()
sid := sd.SpanContext().SpanID()
s := &tracepb.Span{
TraceId: tid[:],
SpanId: sid[:],
TraceState: sd.SpanContext.TraceState().String(),
Status: status(sd.StatusCode, sd.StatusMessage),
StartTimeUnixNano: uint64(sd.StartTime.UnixNano()),
EndTimeUnixNano: uint64(sd.EndTime.UnixNano()),
Links: links(sd.Links),
Kind: spanKind(sd.SpanKind),
Name: sd.Name,
Attributes: Attributes(sd.Attributes),
Events: spanEvents(sd.MessageEvents),
DroppedAttributesCount: uint32(sd.DroppedAttributeCount),
DroppedEventsCount: uint32(sd.DroppedMessageEventCount),
DroppedLinksCount: uint32(sd.DroppedLinkCount),
TraceState: sd.SpanContext().TraceState().String(),
Status: status(sd.Status().Code, sd.Status().Description),
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
Links: links(sd.Links()),
Kind: spanKind(sd.SpanKind()),
Name: sd.Name(),
Attributes: KeyValues(sd.Attributes()),
Events: spanEvents(sd.Events()),
DroppedAttributesCount: uint32(sd.DroppedAttributes()),
DroppedEventsCount: uint32(sd.DroppedEvents()),
DroppedLinksCount: uint32(sd.DroppedLinks()),
}
if psid := sd.Parent.SpanID(); psid.IsValid() {
if psid := sd.Parent().SpanID(); psid.IsValid() {
s.ParentSpanId = psid[:]
}
@ -132,10 +129,12 @@ func span(sd *tracesdk.SpanSnapshot) *tracepb.Span {
func status(status codes.Code, message string) *tracepb.Status {
var c tracepb.Status_StatusCode
switch status {
case codes.Ok:
c = tracepb.Status_STATUS_CODE_OK
case codes.Error:
c = tracepb.Status_STATUS_CODE_ERROR
default:
c = tracepb.Status_STATUS_CODE_OK
c = tracepb.Status_STATUS_CODE_UNSET
}
return &tracepb.Status{
Code: c,
@ -144,7 +143,7 @@ func status(status codes.Code, message string) *tracepb.Status {
}
// links transforms span Links to OTLP span links.
func links(links []trace.Link) []*tracepb.Span_Link {
func links(links []tracesdk.Link) []*tracepb.Span_Link {
if len(links) == 0 {
return nil
}
@ -155,47 +154,35 @@ func links(links []trace.Link) []*tracepb.Span_Link {
// being reused -- in short we need a new otLink per iteration.
otLink := otLink
tid := otLink.TraceID()
sid := otLink.SpanID()
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
sl = append(sl, &tracepb.Span_Link{
TraceId: tid[:],
SpanId: sid[:],
Attributes: Attributes(otLink.Attributes),
TraceId: tid[:],
SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
})
}
return sl
}
// spanEvents transforms span Events to an OTLP span events.
func spanEvents(es []trace.Event) []*tracepb.Span_Event {
func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
if len(es) == 0 {
return nil
}
evCount := len(es)
if evCount > maxMessageEventsPerSpan {
evCount = maxMessageEventsPerSpan
}
events := make([]*tracepb.Span_Event, 0, evCount)
messageEvents := 0
events := make([]*tracepb.Span_Event, len(es))
// Transform message events
for _, e := range es {
if messageEvents >= maxMessageEventsPerSpan {
break
for i := 0; i < len(es); i++ {
events[i] = &tracepb.Span_Event{
Name: es[i].Name,
TimeUnixNano: uint64(es[i].Time.UnixNano()),
Attributes: KeyValues(es[i].Attributes),
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
}
messageEvents++
events = append(events,
&tracepb.Span_Event{
Name: e.Name,
TimeUnixNano: uint64(e.Time.UnixNano()),
Attributes: Attributes(e.Attributes),
// TODO (rghetia) : Add Drop Counts when supported.
},
)
}
return events
}

View File

@ -0,0 +1,295 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"context"
"errors"
"sync"
"time"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type client struct {
endpoint string
dialOpts []grpc.DialOption
metadata metadata.MD
exportTimeout time.Duration
requestFunc retry.RequestFunc
// stopCtx is used as a parent context for all exports. Therefore, when it
// is canceled with the stopFunc all exports are canceled.
stopCtx context.Context
// stopFunc cancels stopCtx, stopping any active exports.
stopFunc context.CancelFunc
// ourConn keeps track of where conn was created: true if created here on
// Start, or false if passed with an option. This is important on Shutdown
// as the conn should only be closed if created here on start. Otherwise,
// it is up to the processes that passed the conn to close it.
ourConn bool
conn *grpc.ClientConn
tscMu sync.RWMutex
tsc coltracepb.TraceServiceClient
}
// Compile time check *client implements otlptrace.Client.
var _ otlptrace.Client = (*client)(nil)
// NewClient creates a new gRPC trace client.
func NewClient(opts ...Option) otlptrace.Client {
return newClient(opts...)
}
func newClient(opts ...Option) *client {
cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
ctx, cancel := context.WithCancel(context.Background())
c := &client{
endpoint: cfg.Traces.Endpoint,
exportTimeout: cfg.Traces.Timeout,
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
dialOpts: cfg.DialOptions,
stopCtx: ctx,
stopFunc: cancel,
conn: cfg.GRPCConn,
}
if len(cfg.Traces.Headers) > 0 {
c.metadata = metadata.New(cfg.Traces.Headers)
}
return c
}
// Start establishes a gRPC connection to the collector.
func (c *client) Start(ctx context.Context) error {
if c.conn == nil {
// If the caller did not provide a ClientConn when the client was
// created, create one using the configuration they did provide.
conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
if err != nil {
return err
}
// Keep track that we own the lifecycle of this conn and need to close
// it on Shutdown.
c.ourConn = true
c.conn = conn
}
// The otlptrace.Client interface states this method is called just once,
// so no need to check if already started.
c.tscMu.Lock()
c.tsc = coltracepb.NewTraceServiceClient(c.conn)
c.tscMu.Unlock()
return nil
}
var errAlreadyStopped = errors.New("the client is already stopped")
// Stop shuts down the client.
//
// Any active connections to a remote endpoint are closed if they were created
// by the client. Any gRPC connection passed during creation using
// WithGRPCConn will not be closed. It is the caller's responsibility to
// handle cleanup of that resource.
//
// This method synchronizes with the UploadTraces method of the client. It
// will wait for any active calls to that method to complete unimpeded, or it
// will cancel any active calls if ctx expires. If ctx expires, the context
// error will be forwarded as the returned error. All client held resources
// will still be released in this situation.
//
// If the client has already stopped, an error will be returned describing
// this.
func (c *client) Stop(ctx context.Context) error {
// Acquire the c.tscMu lock within the ctx lifetime.
acquired := make(chan struct{})
go func() {
c.tscMu.Lock()
close(acquired)
}()
var err error
select {
case <-ctx.Done():
// The Stop timeout is reached. Kill any remaining exports to force
// the clear of the lock and save the timeout error to return and
// signal the shutdown timed out before cleanly stopping.
c.stopFunc()
err = ctx.Err()
// To ensure the client is not left in a dirty state c.tsc needs to be
// set to nil. To avoid the race condition when doing this, ensure
// that all the exports are killed (initiated by c.stopFunc).
<-acquired
case <-acquired:
}
// Hold the tscMu lock for the rest of the function to ensure no new
// exports are started.
defer c.tscMu.Unlock()
// The otlptrace.Client interface states this method is called only
// once, but there is no guarantee it is called after Start. Ensure the
// client is started before doing anything and let the called know if they
// made a mistake.
if c.tsc == nil {
return errAlreadyStopped
}
// Clear c.tsc to signal the client is stopped.
c.tsc = nil
if c.ourConn {
closeErr := c.conn.Close()
// A context timeout error takes precedence over this error.
if err == nil && closeErr != nil {
err = closeErr
}
}
return err
}
var errShutdown = errors.New("the client is shutdown")
// UploadTraces sends a batch of spans.
//
// Retryable errors from the server will be handled according to any
// RetryConfig the client was created with.
func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
// Hold a read lock to ensure a shut down initiated after this starts does
// not abandon the export. This read lock acquire has less priority than a
// write lock acquire (i.e. Stop), meaning if the client is shutting down
// this will come after the shut down.
c.tscMu.RLock()
defer c.tscMu.RUnlock()
if c.tsc == nil {
return errShutdown
}
ctx, cancel := c.exportContext(ctx)
defer cancel()
return c.requestFunc(ctx, func(iCtx context.Context) error {
resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
})
if resp != nil && resp.PartialSuccess != nil {
otel.Handle(internal.PartialSuccessToError(
internal.TracingPartialSuccess,
resp.PartialSuccess.RejectedSpans,
resp.PartialSuccess.ErrorMessage,
))
}
// nil is converted to OK.
if status.Code(err) == codes.OK {
// Success.
return nil
}
return err
})
}
// exportContext returns a copy of parent with an appropriate deadline and
// cancellation function.
//
// It is the callers responsibility to cancel the returned context once its
// use is complete, via the parent or directly with the returned CancelFunc, to
// ensure all resources are correctly released.
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
var (
ctx context.Context
cancel context.CancelFunc
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
} else {
ctx, cancel = context.WithCancel(parent)
}
if c.metadata.Len() > 0 {
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
}
// Unify the client stopCtx with the parent.
go func() {
select {
case <-ctx.Done():
case <-c.stopCtx.Done():
// Cancel the export as the shutdown has timed out.
cancel()
}
}()
return ctx, cancel
}
// retryable returns if err identifies a request that can be retried and a
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
//func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
switch s.Code() {
case codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
return true, throttleDelay(s)
}
// Not a retry-able error.
return false, 0
}
// throttleDelay returns a duration to wait for if an explicit throttle time
// is included in the response status.
func throttleDelay(s *status.Status) time.Duration {
for _, detail := range s.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
return t.RetryDelay.AsDuration()
}
}
return 0
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
func (c *client) MarshalLog() interface{} {
return struct {
Type string
Endpoint string
}{
Type: "otlphttpgrpc",
Endpoint: c.endpoint,
}
}

View File

@ -0,0 +1,31 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"context"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
return otlptrace.New(ctx, NewClient(opts...))
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(opts ...Option) *otlptrace.Exporter {
return otlptrace.NewUnstarted(NewClient(opts...))
}

View File

@ -0,0 +1,189 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
import (
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
)
// Option applies an option to the gRPC driver.
type Option interface {
applyGRPCOption(otlpconfig.Config) otlpconfig.Config
}
func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
converted := make([]otlpconfig.GRPCOption, len(opts))
for i, o := range opts {
converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
}
return converted
}
// RetryConfig defines configuration for retrying export of span batches that
// failed to be received by the target endpoint.
//
// This configuration does not define any network retry strategy. That is
// entirely handled by the gRPC ClientConn.
type RetryConfig retry.Config
type wrappedOption struct {
otlpconfig.GRPCOption
}
func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
return w.ApplyGRPCOption(cfg)
}
// WithInsecure disables client transport security for the exporter's gRPC
// connection just like grpc.WithInsecure()
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
// default, client security is required unless WithInsecure is used.
//
// This option has no effect if WithGRPCConn is used.
func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
// WithEndpoint sets the target endpoint the exporter will connect to. If
// unset, localhost:4317 will be used as a default.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
// This option has no effect if WithGRPCConn is used.
func WithReconnectionPeriod(rp time.Duration) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.ReconnectionPeriod = rp
return cfg
})}
}
func compressorToCompression(compressor string) otlpconfig.Compression {
if compressor == "gzip" {
return otlpconfig.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlpconfig.NoCompression
}
// WithCompressor sets the compressor for the gRPC client to use when sending
// requests. It is the responsibility of the caller to ensure that the
// compressor set has been registered with google.golang.org/grpc/encoding.
// This can be done by encoding.RegisterCompressor. Some compressors
// auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
//
// This option has no effect if WithGRPCConn is used.
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
// WithHeaders will send the provided headers with each gRPC requests.
func WithHeaders(headers map[string]string) Option {
return wrappedOption{otlpconfig.WithHeaders(headers)}
}
// WithTLSCredentials allows the connection to use TLS credentials when
// talking to the server. It takes in grpc.TransportCredentials instead of say
// a Certificate file or a tls.Certificate, because the retrieving of these
// credentials can be done in many ways e.g. plain file, in code tls.Config or
// by certificate rotation, so it is up to the caller to decide what to use.
//
// This option has no effect if WithGRPCConn is used.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.Traces.GRPCCredentials = creds
return cfg
})}
}
// WithServiceConfig defines the default gRPC service config used.
//
// This option has no effect if WithGRPCConn is used.
func WithServiceConfig(serviceConfig string) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.ServiceConfig = serviceConfig
return cfg
})}
}
// WithDialOption sets explicit grpc.DialOptions to use when making a
// connection. The options here are appended to the internal grpc.DialOptions
// used so they will take precedence over any other internal grpc.DialOptions
// they might conflict with.
//
// This option has no effect if WithGRPCConn is used.
func WithDialOption(opts ...grpc.DialOption) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.DialOptions = opts
return cfg
})}
}
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
//
// This option takes precedence over any other option that relates to
// establishing or persisting a gRPC connection to a target endpoint. Any
// other option of those types passed will be ignored.
//
// It is the callers responsibility to close the passed conn. The client
// Shutdown method will not close this connection.
func WithGRPCConn(conn *grpc.ClientConn) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
cfg.GRPCConn = conn
return cfg
})}
}
// WithTimeout sets the max amount of time a client will attempt to export a
// batch of spans. This takes precedence over any retry settings defined with
// WithRetry, once this time limit has been reached the export is abandoned
// and the batch of spans is dropped.
//
// If unset, the default timeout will be set to 10 seconds.
func WithTimeout(duration time.Duration) Option {
return wrappedOption{otlpconfig.WithTimeout(duration)}
}
// WithRetry sets the retry policy for transient retryable errors that may be
// returned by the target endpoint when exporting a batch of spans.
//
// If the target endpoint responds with not only a retryable error, but
// explicitly returns a backoff time in the response. That time will take
// precedence over these settings.
//
// These settings do not define any network retry strategy. That is entirely
// handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
// after each error for no more than a total time of 1 minute.
func WithRetry(settings RetryConfig) Option {
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
}

View File

@ -1,145 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp // import "go.opentelemetry.io/otel/exporters/otlp"
import (
"context"
"sync"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
)
// ProtocolDriver is an interface used by OTLP exporter. It's
// responsible for connecting to and disconnecting from the collector,
// and for transforming traces and metrics into wire format and
// transmitting them to the collector.
type ProtocolDriver interface {
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
Start(ctx context.Context) error
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with ExportMetrics or ExportTraces, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
Stop(ctx context.Context) error
// ExportMetrics should transform the passed metrics to the
// wire format and send it to the collector. May be called
// concurrently with ExportTraces, so the manager needs to
// take this into account by doing proper locking.
ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error
// ExportTraces should transform the passed traces to the wire
// format and send it to the collector. May be called
// concurrently with ExportMetrics, so the manager needs to
// take this into account by doing proper locking.
ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error
}
// SplitConfig is used to configure a split driver.
type SplitConfig struct {
// ForMetrics driver will be used for sending metrics to the
// collector.
ForMetrics ProtocolDriver
// ForTraces driver will be used for sending spans to the
// collector.
ForTraces ProtocolDriver
}
type splitDriver struct {
metric ProtocolDriver
trace ProtocolDriver
}
var _ ProtocolDriver = (*splitDriver)(nil)
// NewSplitDriver creates a protocol driver which contains two other
// protocol drivers and will forward traces to one of them and metrics
// to another.
func NewSplitDriver(cfg SplitConfig) ProtocolDriver {
return &splitDriver{
metric: cfg.ForMetrics,
trace: cfg.ForTraces,
}
}
// Start implements ProtocolDriver. It starts both drivers at the same
// time.
func (d *splitDriver) Start(ctx context.Context) error {
wg := sync.WaitGroup{}
wg.Add(2)
var (
metricErr error
traceErr error
)
go func() {
defer wg.Done()
metricErr = d.metric.Start(ctx)
}()
go func() {
defer wg.Done()
traceErr = d.trace.Start(ctx)
}()
wg.Wait()
if metricErr != nil {
return metricErr
}
if traceErr != nil {
return traceErr
}
return nil
}
// Stop implements ProtocolDriver. It stops both drivers at the same
// time.
func (d *splitDriver) Stop(ctx context.Context) error {
wg := sync.WaitGroup{}
wg.Add(2)
var (
metricErr error
traceErr error
)
go func() {
defer wg.Done()
metricErr = d.metric.Stop(ctx)
}()
go func() {
defer wg.Done()
traceErr = d.trace.Stop(ctx)
}()
wg.Wait()
if metricErr != nil {
return metricErr
}
if traceErr != nil {
return traceErr
}
return nil
}
// ExportMetrics implements ProtocolDriver. It forwards the call to
// the driver used for sending metrics.
func (d *splitDriver) ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
return d.metric.ExportMetrics(ctx, cps, selector)
}
// ExportTraces implements ProtocolDriver. It forwards the call to the
// driver used for sending spans.
func (d *splitDriver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
return d.trace.ExportTraces(ctx, ss)
}

View File

@ -18,7 +18,6 @@ import (
"log"
"os"
"sync"
"sync/atomic"
)
var (
@ -26,64 +25,73 @@ var (
// throughout an OpenTelemetry instrumented project. When a user
// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
// `Handle` and will be delegated to the registered ErrorHandler.
globalErrorHandler = &loggingErrorHandler{
l: log.New(os.Stderr, "", log.LstdFlags),
}
globalErrorHandler = defaultErrorHandler()
// delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
// only ever registered once.
delegateErrorHandlerOnce sync.Once
// Comiple time check that loggingErrorHandler implements ErrorHandler.
_ ErrorHandler = (*loggingErrorHandler)(nil)
// Compile-time check that delegator implements ErrorHandler.
_ ErrorHandler = (*delegator)(nil)
// Compile-time check that errLogger implements ErrorHandler.
_ ErrorHandler = (*errLogger)(nil)
)
// loggingErrorHandler logs all errors to STDERR.
type loggingErrorHandler struct {
delegate atomic.Value
type delegator struct {
lock *sync.RWMutex
eh ErrorHandler
}
func (d *delegator) Handle(err error) {
d.lock.RLock()
defer d.lock.RUnlock()
d.eh.Handle(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *delegator) setDelegate(eh ErrorHandler) {
d.lock.Lock()
defer d.lock.Unlock()
d.eh = eh
}
func defaultErrorHandler() *delegator {
return &delegator{
lock: &sync.RWMutex{},
eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)},
}
}
// errLogger logs errors if no delegate is set, otherwise they are delegated.
type errLogger struct {
l *log.Logger
}
// setDelegate sets the ErrorHandler delegate if one is not already set.
func (h *loggingErrorHandler) setDelegate(d ErrorHandler) {
if h.delegate.Load() != nil {
// Delegate already registered
return
}
h.delegate.Store(d)
}
// Handle implements ErrorHandler.
func (h *loggingErrorHandler) Handle(err error) {
if d := h.delegate.Load(); d != nil {
d.(ErrorHandler).Handle(err)
return
}
// Handle logs err if no delegate is set, otherwise it is delegated.
func (h *errLogger) Handle(err error) {
h.l.Print(err)
}
// GetErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
// instance has been set (`SetErrorHandler`), the default ErrorHandler which
// logs errors to STDERR is returned.
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
// until an override ErrorHandler is set with SetErrorHandler. All
// ErrorHandler returned prior to this will automatically forward errors to
// the set instance instead of logging.
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return globalErrorHandler
}
// SetErrorHandler sets the global ErrorHandler to be h.
// SetErrorHandler sets the global ErrorHandler to h.
//
// The first time this is called all ErrorHandler previously returned from
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
delegateErrorHandlerOnce.Do(func() {
current := GetErrorHandler()
if current == h {
return
}
if internalHandler, ok := current.(*loggingErrorHandler); ok {
internalHandler.setDelegate(h)
}
})
globalErrorHandler.setDelegate(h)
}
// Handle is a convience function for ErrorHandler().Handle(err)
// Handle is a convenience function for ErrorHandler().Handle(err).
func Handle(err error) {
GetErrorHandler().Handle(err)
}

View File

@ -12,327 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package baggage provides types and functions to manage W3C Baggage.
package baggage
/*
Package baggage provides base types and functionality to store and retrieve
baggage in Go context. This package exists because the OpenTracing bridge to
OpenTelemetry needs to synchronize state whenever baggage for a context is
modified and that context contains an OpenTracing span. If it were not for
this need this package would not need to exist and the
`go.opentelemetry.io/otel/baggage` package would be the singular place where
W3C baggage is handled.
*/
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
import (
"context"
// List is the collection of baggage members. The W3C allows for duplicates,
// but OpenTelemetry does not, therefore, this is represented as a map.
type List map[string]Item
"go.opentelemetry.io/otel/attribute"
)
type rawMap map[attribute.Key]attribute.Value
type keySet map[attribute.Key]struct{}
// Map is an immutable storage for correlations.
type Map struct {
m rawMap
// Item is the value and metadata properties part of a list-member.
type Item struct {
Value string
Properties []Property
}
// MapUpdate contains information about correlation changes to be
// made.
type MapUpdate struct {
// DropSingleK contains a single key to be dropped from
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key to drop.
DropSingleK attribute.Key
// DropMultiK contains all the keys to be dropped from
// correlations.
DropMultiK []attribute.Key
// Property is a metadata entry for a list-member.
type Property struct {
Key, Value string
// SingleKV contains a single key-value pair to be added to
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key-value pair to add.
SingleKV attribute.KeyValue
// MultiKV contains all the key-value pairs to be added to
// correlations.
MultiKV []attribute.KeyValue
}
func newMap(raw rawMap) Map {
return Map{
m: raw,
}
}
// NewEmptyMap creates an empty correlations map.
func NewEmptyMap() Map {
return newMap(nil)
}
// NewMap creates a map with the contents of the update applied. In
// this function, having an update with DropSingleK or DropMultiK
// makes no sense - those fields are effectively ignored.
func NewMap(update MapUpdate) Map {
return NewEmptyMap().Apply(update)
}
// Apply creates a copy of the map with the contents of the update
// applied. Apply will first drop the keys from DropSingleK and
// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
func (m Map) Apply(update MapUpdate) Map {
delSet, addSet := getModificationSets(update)
mapSize := getNewMapSize(m.m, delSet, addSet)
r := make(rawMap, mapSize)
for k, v := range m.m {
// do not copy items we want to drop
if _, ok := delSet[k]; ok {
continue
}
// do not copy items we would overwrite
if _, ok := addSet[k]; ok {
continue
}
r[k] = v
}
if update.SingleKV.Key.Defined() {
r[update.SingleKV.Key] = update.SingleKV.Value
}
for _, kv := range update.MultiKV {
r[kv.Key] = kv.Value
}
if len(r) == 0 {
r = nil
}
return newMap(r)
}
func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
deletionsCount := len(update.DropMultiK)
if update.DropSingleK.Defined() {
deletionsCount++
}
if deletionsCount > 0 {
delSet = make(map[attribute.Key]struct{}, deletionsCount)
for _, k := range update.DropMultiK {
delSet[k] = struct{}{}
}
if update.DropSingleK.Defined() {
delSet[update.DropSingleK] = struct{}{}
}
}
additionsCount := len(update.MultiKV)
if update.SingleKV.Key.Defined() {
additionsCount++
}
if additionsCount > 0 {
addSet = make(map[attribute.Key]struct{}, additionsCount)
for _, k := range update.MultiKV {
addSet[k.Key] = struct{}{}
}
if update.SingleKV.Key.Defined() {
addSet[update.SingleKV.Key] = struct{}{}
}
}
return
}
func getNewMapSize(m rawMap, delSet, addSet keySet) int {
mapSizeDiff := 0
for k := range addSet {
if _, ok := m[k]; !ok {
mapSizeDiff++
}
}
for k := range delSet {
if _, ok := m[k]; ok {
if _, inAddSet := addSet[k]; !inAddSet {
mapSizeDiff--
}
}
}
return len(m) + mapSizeDiff
}
// Value gets a value from correlations map and returns a boolean
// value indicating whether the key exist in the map.
func (m Map) Value(k attribute.Key) (attribute.Value, bool) {
value, ok := m.m[k]
return value, ok
}
// HasValue returns a boolean value indicating whether the key exist
// in the map.
func (m Map) HasValue(k attribute.Key) bool {
_, has := m.Value(k)
return has
}
// Len returns a length of the map.
func (m Map) Len() int {
return len(m.m)
}
// Foreach calls a passed callback once on each key-value pair until
// all the key-value pairs of the map were iterated or the callback
// returns false, whichever happens first.
func (m Map) Foreach(f func(attribute.KeyValue) bool) {
for k, v := range m.m {
if !f(attribute.KeyValue{
Key: k,
Value: v,
}) {
return
}
}
}
type correlationsType struct{}
// SetHookFunc describes a type of a callback that is called when
// storing baggage in the context.
type SetHookFunc func(context.Context) context.Context
// GetHookFunc describes a type of a callback that is called when
// getting baggage from the context.
type GetHookFunc func(context.Context, Map) Map
// value under this key is either of type Map or correlationsData
var correlationsKey = &correlationsType{}
type correlationsData struct {
m Map
setHook SetHookFunc
getHook GetHookFunc
}
func (d correlationsData) isHookless() bool {
return d.setHook == nil && d.getHook == nil
}
type hookKind int
const (
hookKindSet hookKind = iota
hookKindGet
)
func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
switch kind {
case hookKindSet:
d.setHook = setHook
case hookKindGet:
d.getHook = getHook
}
}
// ContextWithSetHook installs a hook function that will be invoked
// every time ContextWithMap is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls ContextWithMap, so the hook will be
// invoked.
//
// Passing nil SetHookFunc creates a context with no set hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
return contextWithHook(ctx, hookKindSet, hook, nil)
}
// ContextWithGetHook installs a hook function that will be invoked
// every time MapFromContext is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls MapFromContext, so the hook will be
// invoked.
//
// Passing nil GetHookFunc creates a context with no get hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
return contextWithHook(ctx, hookKindGet, nil, hook)
}
func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.overrideHook(kind, setHook, getHook)
if v.isHookless() {
return context.WithValue(ctx, correlationsKey, v.m)
}
return context.WithValue(ctx, correlationsKey, v)
case Map:
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
default:
m := NewEmptyMap()
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
}
}
func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
d := correlationsData{m: m}
d.overrideHook(kind, setHook, getHook)
if d.isHookless() {
return ctx
}
return context.WithValue(ctx, correlationsKey, d)
}
// ContextWithNoHooks creates a context with all the hooks
// disabled. Also returns old set and get hooks. This function can be
// used to temporarily clear the context from hooks and then reinstate
// them by calling ContextWithSetHook and ContextWithGetHook functions
// passing the hooks returned by this function.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
default:
return ctx, nil, nil
}
}
// ContextWithMap returns a context with the Map entered into it.
func ContextWithMap(ctx context.Context, m Map) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.m = m
ctx = context.WithValue(ctx, correlationsKey, v)
if v.setHook != nil {
ctx = v.setHook(ctx)
}
return ctx
default:
return context.WithValue(ctx, correlationsKey, m)
}
}
// ContextWithNoCorrelationData returns a context stripped of correlation
// data.
func ContextWithNoCorrelationData(ctx context.Context) context.Context {
return context.WithValue(ctx, correlationsKey, nil)
}
// NewContext returns a context with the map from passed context
// updated with the passed key-value pairs.
func NewContext(ctx context.Context, keyvalues ...attribute.KeyValue) context.Context {
return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
MultiKV: keyvalues,
}))
}
// MapFromContext gets the current Map from a Context.
func MapFromContext(ctx context.Context) Map {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
if v.getHook != nil {
return v.getHook(ctx, v.m)
}
return v.m
case Map:
return v
default:
return NewEmptyMap()
}
// HasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
HasValue bool
}

View File

@ -0,0 +1,92 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
import "context"
type baggageContextKeyType int
const baggageKey baggageContextKeyType = iota
// SetHookFunc is a callback called when storing baggage in the context.
type SetHookFunc func(context.Context, List) context.Context
// GetHookFunc is a callback called when getting baggage from the context.
type GetHookFunc func(context.Context, List) List
type baggageState struct {
list List
setHook SetHookFunc
getHook GetHookFunc
}
// ContextWithSetHook returns a copy of parent with hook configured to be
// invoked every time ContextWithBaggage is called.
//
// Passing nil SetHookFunc creates a context with no set hook to call.
func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.setHook = hook
return context.WithValue(parent, baggageKey, s)
}
// ContextWithGetHook returns a copy of parent with hook configured to be
// invoked every time FromContext is called.
//
// Passing nil GetHookFunc creates a context with no get hook to call.
func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.getHook = hook
return context.WithValue(parent, baggageKey, s)
}
// ContextWithList returns a copy of parent with baggage. Passing nil list
// returns a context without any baggage.
func ContextWithList(parent context.Context, list List) context.Context {
var s baggageState
if v, ok := parent.Value(baggageKey).(baggageState); ok {
s = v
}
s.list = list
ctx := context.WithValue(parent, baggageKey, s)
if s.setHook != nil {
ctx = s.setHook(ctx, list)
}
return ctx
}
// ListFromContext returns the baggage contained in ctx.
func ListFromContext(ctx context.Context) List {
switch v := ctx.Value(baggageKey).(type) {
case baggageState:
if v.getHook != nil {
return v.getHook(ctx, v.list)
}
return v.list
default:
return nil
}
}

View File

@ -0,0 +1,63 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
"os"
"sync"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
)
// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals.
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
var globalLoggerLock = &sync.RWMutex{}
// SetLogger overrides the globalLogger with l.
//
// To see Info messages use a logger with `l.V(1).Enabled() == true`
// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
func SetLogger(l logr.Logger) {
globalLoggerLock.Lock()
defer globalLoggerLock.Unlock()
globalLogger = l
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less then 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.V(1).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
globalLoggerLock.RLock()
defer globalLoggerLock.RUnlock()
globalLogger.V(5).Info(msg, keysAndValues...)
}

View File

@ -1,348 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"context"
"sync"
"sync/atomic"
"unsafe"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/registry"
)
// This file contains the forwarding implementation of MeterProvider used as
// the default global instance. Metric events using instruments provided by
// this implementation are no-ops until the first Meter implementation is set
// as the global provider.
//
// The implementation here uses Mutexes to maintain a list of active Meters in
// the MeterProvider and Instruments in each Meter, under the assumption that
// these interfaces are not performance-critical.
//
// We have the invariant that setDelegate() will be called before a new
// MeterProvider implementation is registered as the global provider. Mutexes
// in the MeterProvider and Meters ensure that each instrument has a delegate
// before the global provider is set.
//
// Bound instrument operations are implemented by delegating to the
// instrument after it is registered, with a sync.Once initializer to
// protect against races with Release().
//
// Metric uniqueness checking is implemented by calling the exported
// methods of the api/metric/registry package.
type meterKey struct {
Name, Version string
}
type meterProvider struct {
delegate metric.MeterProvider
// lock protects `delegate` and `meters`.
lock sync.Mutex
// meters maintains a unique entry for every named Meter
// that has been registered through the global instance.
meters map[meterKey]*meterEntry
}
type meterImpl struct {
delegate unsafe.Pointer // (*metric.MeterImpl)
lock sync.Mutex
syncInsts []*syncImpl
asyncInsts []*asyncImpl
}
type meterEntry struct {
unique metric.MeterImpl
impl meterImpl
}
type instrument struct {
descriptor metric.Descriptor
}
type syncImpl struct {
delegate unsafe.Pointer // (*metric.SyncImpl)
instrument
}
type asyncImpl struct {
delegate unsafe.Pointer // (*metric.AsyncImpl)
instrument
runner metric.AsyncRunner
}
// SyncImpler is implemented by all of the sync metric
// instruments.
type SyncImpler interface {
SyncImpl() metric.SyncImpl
}
// AsyncImpler is implemented by all of the async
// metric instruments.
type AsyncImpler interface {
AsyncImpl() metric.AsyncImpl
}
type syncHandle struct {
delegate unsafe.Pointer // (*metric.BoundInstrumentImpl)
inst *syncImpl
labels []attribute.KeyValue
initialize sync.Once
}
var _ metric.MeterProvider = &meterProvider{}
var _ metric.MeterImpl = &meterImpl{}
var _ metric.InstrumentImpl = &syncImpl{}
var _ metric.BoundSyncImpl = &syncHandle{}
var _ metric.AsyncImpl = &asyncImpl{}
func (inst *instrument) Descriptor() metric.Descriptor {
return inst.descriptor
}
// MeterProvider interface and delegation
func newMeterProvider() *meterProvider {
return &meterProvider{
meters: map[meterKey]*meterEntry{},
}
}
func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
p.lock.Lock()
defer p.lock.Unlock()
p.delegate = provider
for key, entry := range p.meters {
entry.impl.setDelegate(key.Name, key.Version, provider)
}
p.meters = nil
}
func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
p.lock.Lock()
defer p.lock.Unlock()
if p.delegate != nil {
return p.delegate.Meter(instrumentationName, opts...)
}
key := meterKey{
Name: instrumentationName,
Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
}
entry, ok := p.meters[key]
if !ok {
entry = &meterEntry{}
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
p.meters[key] = entry
}
return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
}
// Meter interface and delegation
func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
m.lock.Lock()
defer m.lock.Unlock()
d := new(metric.MeterImpl)
*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
m.delegate = unsafe.Pointer(d)
for _, inst := range m.syncInsts {
inst.setDelegate(*d)
}
m.syncInsts = nil
for _, obs := range m.asyncInsts {
obs.setDelegate(*d)
}
m.asyncInsts = nil
}
func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewSyncInstrument(desc)
}
inst := &syncImpl{
instrument: instrument{
descriptor: desc,
},
}
m.syncInsts = append(m.syncInsts, inst)
return inst, nil
}
// Synchronous delegation
func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.SyncImpl)
var err error
*implPtr, err = d.NewSyncInstrument(inst.descriptor)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
}
func (inst *syncImpl) Implementation() interface{} {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return inst
}
func (inst *syncImpl) Bind(labels []attribute.KeyValue) metric.BoundSyncImpl {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Bind(labels)
}
return &syncHandle{
inst: inst,
labels: labels,
}
}
func (bound *syncHandle) Unbind() {
bound.initialize.Do(func() {})
implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
if implPtr == nil {
return
}
(*implPtr).Unbind()
}
// Async delegation
func (m *meterImpl) NewAsyncInstrument(
desc metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewAsyncInstrument(desc, runner)
}
inst := &asyncImpl{
instrument: instrument{
descriptor: desc,
},
runner: runner,
}
m.asyncInsts = append(m.asyncInsts, inst)
return inst, nil
}
func (obs *asyncImpl) Implementation() interface{} {
if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return obs
}
func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.AsyncImpl)
var err error
*implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
}
// Metric updates
func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) {
if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
(*delegatePtr).RecordBatch(ctx, labels, measurements...)
}
}
func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) {
if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
(*instPtr).RecordOne(ctx, number, labels)
}
}
// Bound instrument initialization
func (bound *syncHandle) RecordOne(ctx context.Context, number number.Number) {
instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
if instPtr == nil {
return
}
var implPtr *metric.BoundSyncImpl
bound.initialize.Do(func() {
implPtr = new(metric.BoundSyncImpl)
*implPtr = (*instPtr).Bind(bound.labels)
atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
})
if implPtr == nil {
implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
}
// This may still be nil if instrument was created and bound
// without a delegate, then the instrument was set to have a
// delegate and unbound.
if implPtr == nil {
return
}
(*implPtr).RecordOne(ctx, number)
}
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
"meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
"syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
"asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
"syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"context"

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"errors"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@ -28,10 +28,6 @@ type (
tp trace.TracerProvider
}
meterProviderHolder struct {
mp metric.MeterProvider
}
propagatorsHolder struct {
tm propagation.TextMapPropagator
}
@ -39,10 +35,8 @@ type (
var (
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce sync.Once
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
)
@ -54,43 +48,28 @@ func TracerProvider() trace.TracerProvider {
// SetTracerProvider is the internal implementation for global.SetTracerProvider.
func SetTracerProvider(tp trace.TracerProvider) {
current := TracerProvider()
if _, cOk := current.(*tracerProvider); cOk {
if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
// Do not assign the default delegating TracerProvider to delegate
// to itself.
Error(
errors.New("no delegate configured in tracer provider"),
"Setting tracer provider to it's current value. No delegate will be configured",
)
return
}
}
delegateTraceOnce.Do(func() {
current := TracerProvider()
if current == tp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid TracerProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*tracerProvider); ok {
if def, ok := current.(*tracerProvider); ok {
def.setDelegate(tp)
}
})
globalTracer.Store(tracerProviderHolder{tp: tp})
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeter.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
delegateMeterOnce.Do(func() {
current := MeterProvider()
if current == mp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid MeterProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeter.Store(meterProviderHolder{mp: mp})
}
// TextMapPropagator is the internal implementation for global.TextMapPropagator.
func TextMapPropagator() propagation.TextMapPropagator {
return globalPropagators.Load().(propagatorsHolder).tm
@ -98,15 +77,24 @@ func TextMapPropagator() propagation.TextMapPropagator {
// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
func SetTextMapPropagator(p propagation.TextMapPropagator) {
current := TextMapPropagator()
if _, cOk := current.(*textMapPropagator); cOk {
if _, pOk := p.(*textMapPropagator); pOk && current == p {
// Do not assign the default delegating TextMapPropagator to
// delegate to itself.
Error(
errors.New("no delegate configured in text map propagator"),
"Setting text map propagator to it's current value. No delegate will be configured",
)
return
}
}
// For the textMapPropagator already returned by TextMapPropagator
// delegate to p.
delegateTextMapPropagatorOnce.Do(func() {
if current := TextMapPropagator(); current == p {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid TextMapPropagator, the global instance cannot be reinstalled")
} else if def, ok := current.(*textMapPropagator); ok {
if def, ok := current.(*textMapPropagator); ok {
def.SetDelegate(p)
}
})
@ -120,24 +108,8 @@ func defaultTracerValue() *atomic.Value {
return v
}
func defaultMeterValue() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: newMeterProvider()})
return v
}
func defaultPropagatorsValue() *atomic.Value {
v := &atomic.Value{}
v.Store(propagatorsHolder{tm: newTextMapPropagator()})
return v
}
// ResetForTest restores the initial global state, for testing purposes.
func ResetForTest() {
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce = sync.Once{}
delegateTraceOnce = sync.Once{}
delegateTextMapPropagatorOnce = sync.Once{}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package global
package global // import "go.opentelemetry.io/otel/internal/global"
/*
This file contains the forwarding implementation of the TracerProvider used as
@ -36,7 +36,8 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/trace/noop"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
)
@ -89,9 +90,10 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
// At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
c := trace.NewTracerConfig(opts...)
key := il{
name: name,
version: trace.NewTracerConfig(opts...).InstrumentationVersion,
version: c.InstrumentationVersion(),
}
if p.tracers == nil {
@ -102,7 +104,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return val
}
t := &tracer{name: name, opts: opts}
t := &tracer{name: name, opts: opts, provider: p}
p.tracers[key] = t
return t
}
@ -117,8 +119,9 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
name string
opts []trace.TracerOption
name string
opts []trace.TracerOption
provider *tracerProvider
delegate atomic.Value
}
@ -138,10 +141,52 @@ func (t *tracer) setDelegate(provider trace.TracerProvider) {
// Start implements trace.Tracer by forwarding the call to t.delegate if
// set, otherwise it forwards the call to a NoopTracer.
func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
delegate := t.delegate.Load()
if delegate != nil {
return delegate.(trace.Tracer).Start(ctx, name, opts...)
}
return noop.Tracer.Start(ctx, name, opts...)
s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
ctx = trace.ContextWithSpan(ctx, s)
return ctx, s
}
// nonRecordingSpan is a minimal implementation of a Span that wraps a
// SpanContext. It performs no operations other than to return the wrapped
// SpanContext.
type nonRecordingSpan struct {
sc trace.SpanContext
tracer *tracer
}
var _ trace.Span = nonRecordingSpan{}
// SpanContext returns the wrapped SpanContext.
func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
// IsRecording always returns false.
func (nonRecordingSpan) IsRecording() bool { return false }
// SetStatus does nothing.
func (nonRecordingSpan) SetStatus(codes.Code, string) {}
// SetError does nothing.
func (nonRecordingSpan) SetError(bool) {}
// SetAttributes does nothing.
func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
// End does nothing.
func (nonRecordingSpan) End(...trace.SpanEndOption) {}
// RecordError does nothing.
func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
// SetName does nothing.
func (nonRecordingSpan) SetName(string) {}
func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }

View File

@ -1,148 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
)
var ErrInvalidAsyncRunner = errors.New("unknown async runner type")
// AsyncCollector is an interface used between the MeterImpl and the
// AsyncInstrumentState helper below. This interface is implemented by
// the SDK to provide support for running observer callbacks.
type AsyncCollector interface {
// CollectAsync passes a batch of observations to the MeterImpl.
CollectAsync(labels []attribute.KeyValue, observation ...metric.Observation)
}
// AsyncInstrumentState manages an ordered set of asynchronous
// instruments and the distinct runners, taking into account batch
// observer callbacks.
type AsyncInstrumentState struct {
lock sync.Mutex
// errorOnce will use the otel.Handler to report an error
// once in case of an invalid runner attempting to run.
errorOnce sync.Once
// runnerMap keeps the set of runners that will run each
// collection interval. Singletons are entered with a real
// instrument each, batch observers are entered with a nil
// instrument, ensuring that when a singleton callback is used
// repeatedly, it is executed repeatedly in the interval, while
// when a batch callback is used repeatedly, it only executes
// once per interval.
runnerMap map[asyncRunnerPair]struct{}
// runners maintains the set of runners in the order they were
// registered.
runners []asyncRunnerPair
// instruments maintains the set of instruments in the order
// they were registered.
instruments []metric.AsyncImpl
}
// asyncRunnerPair is a map entry for Observer callback runners.
type asyncRunnerPair struct {
// runner is used as a map key here. The API ensures
// that all callbacks are pointers for this reason.
runner metric.AsyncRunner
// inst refers to a non-nil instrument when `runner` is a
// AsyncSingleRunner.
inst metric.AsyncImpl
}
// NewAsyncInstrumentState returns a new *AsyncInstrumentState, for
// use by MeterImpl to manage running the set of observer callbacks in
// the correct order.
func NewAsyncInstrumentState() *AsyncInstrumentState {
return &AsyncInstrumentState{
runnerMap: map[asyncRunnerPair]struct{}{},
}
}
// Instruments returns the asynchronous instruments managed by this
// object, the set that should be checkpointed after observers are
// run.
func (a *AsyncInstrumentState) Instruments() []metric.AsyncImpl {
a.lock.Lock()
defer a.lock.Unlock()
return a.instruments
}
// Register adds a new asynchronous instrument to by managed by this
// object. This should be called during NewAsyncInstrument() and
// assumes that errors (e.g., duplicate registration) have already
// been checked.
func (a *AsyncInstrumentState) Register(inst metric.AsyncImpl, runner metric.AsyncRunner) {
a.lock.Lock()
defer a.lock.Unlock()
a.instruments = append(a.instruments, inst)
// asyncRunnerPair reflects this callback in the asyncRunners
// list. If this is a batch runner, the instrument is nil.
// If this is a single-Observer runner, the instrument is
// included. This ensures that batch callbacks are called
// once and single callbacks are called once per instrument.
rp := asyncRunnerPair{
runner: runner,
}
if _, ok := runner.(metric.AsyncSingleRunner); ok {
rp.inst = inst
}
if _, ok := a.runnerMap[rp]; !ok {
a.runnerMap[rp] = struct{}{}
a.runners = append(a.runners, rp)
}
}
// Run executes the complete set of observer callbacks.
func (a *AsyncInstrumentState) Run(ctx context.Context, collector AsyncCollector) {
a.lock.Lock()
runners := a.runners
a.lock.Unlock()
for _, rp := range runners {
// The runner must be a single or batch runner, no
// other implementations are possible because the
// interface has un-exported methods.
if singleRunner, ok := rp.runner.(metric.AsyncSingleRunner); ok {
singleRunner.Run(ctx, rp.inst, collector.CollectAsync)
continue
}
if multiRunner, ok := rp.runner.(metric.AsyncBatchRunner); ok {
multiRunner.Run(ctx, collector.CollectAsync)
continue
}
a.errorOnce.Do(func() {
otel.Handle(fmt.Errorf("%w: type %T (reported once)", ErrInvalidAsyncRunner, rp))
})
}
}

View File

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
package internal // import "go.opentelemetry.io/otel/internal"
import (
"math"
"unsafe"
)
func BoolToRaw(b bool) uint64 {
func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
if b {
return 1
}

View File

@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/sdk/metric"
package otel // import "go.opentelemetry.io/otel"
import "unsafe"
import (
"github.com/go-logr/logr"
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),
"record.updateCount": unsafe.Offsetof(record{}.updateCount),
}
"go.opentelemetry.io/otel/internal/global"
)
// SetLogger configures the logger used internally to opentelemetry.
func SetLogger(logger logr.Logger) {
global.SetLogger(logger)
}

View File

@ -14,85 +14,26 @@
package metric // import "go.opentelemetry.io/otel/metric"
import (
"go.opentelemetry.io/otel/unit"
)
// InstrumentConfig contains options for metric instrument descriptors.
type InstrumentConfig struct {
// Description describes the instrument in human-readable terms.
Description string
// Unit describes the measurement unit for a instrument.
Unit unit.Unit
// InstrumentationName is the name of the library providing
// instrumentation.
InstrumentationName string
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
}
// InstrumentOption is an interface for applying metric instrument options.
type InstrumentOption interface {
// ApplyMeter is used to set a InstrumentOption value of a
// InstrumentConfig.
ApplyInstrument(*InstrumentConfig)
}
// NewInstrumentConfig creates a new InstrumentConfig
// and applies all the given options.
func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
var config InstrumentConfig
for _, o := range opts {
o.ApplyInstrument(&config)
}
return config
}
// WithDescription applies provided description.
func WithDescription(desc string) InstrumentOption {
return descriptionOption(desc)
}
type descriptionOption string
func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
config.Description = string(d)
}
// WithUnit applies provided unit.
func WithUnit(unit unit.Unit) InstrumentOption {
return unitOption(unit)
}
type unitOption unit.Unit
func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
config.Unit = unit.Unit(u)
}
// WithInstrumentationName sets the instrumentation name.
func WithInstrumentationName(name string) InstrumentOption {
return instrumentationNameOption(name)
}
type instrumentationNameOption string
func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationName = string(i)
}
// MeterConfig contains options for Meters.
type MeterConfig struct {
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
instrumentationVersion string
schemaURL string
}
// InstrumentationVersion is the version of the library providing instrumentation.
func (cfg MeterConfig) InstrumentationVersion() string {
return cfg.instrumentationVersion
}
// SchemaURL is the schema_url of the library providing instrumentation.
func (cfg MeterConfig) SchemaURL() string {
return cfg.schemaURL
}
// MeterOption is an interface for applying Meter options.
type MeterOption interface {
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
ApplyMeter(*MeterConfig)
// applyMeter is used to set a MeterOption value of a MeterConfig.
applyMeter(MeterConfig) MeterConfig
}
// NewMeterConfig creates a new MeterConfig and applies
@ -100,29 +41,29 @@ type MeterOption interface {
func NewMeterConfig(opts ...MeterOption) MeterConfig {
var config MeterConfig
for _, o := range opts {
o.ApplyMeter(&config)
config = o.applyMeter(config)
}
return config
}
// InstrumentationOption is an interface for applying instrumentation specific
// options.
type InstrumentationOption interface {
InstrumentOption
MeterOption
type meterOptionFunc func(MeterConfig) MeterConfig
func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
return fn(cfg)
}
// WithInstrumentationVersion sets the instrumentation version.
func WithInstrumentationVersion(version string) InstrumentationOption {
return instrumentationVersionOption(version)
func WithInstrumentationVersion(version string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.instrumentationVersion = version
return config
})
}
type instrumentationVersionOption string
func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
config.InstrumentationVersion = string(i)
}
func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationVersion = string(i)
// WithSchemaURL sets the schema URL.
func WithSchemaURL(schemaURL string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.schemaURL = schemaURL
return config
})
}

View File

@ -19,49 +19,5 @@ OpenTelemetry API.
This package is currently in a pre-GA phase. Backwards incompatible changes
may be introduced in subsequent minor version releases as we work to track the
evolving OpenTelemetry specification and user feedback.
Measurements can be made about an operation being performed or the state of a
system in general. These measurements can be crucial to the reliable operation
of code and provide valuable insights about the inner workings of a system.
Measurements are made using instruments provided by this package. The type of
instrument used will depend on the type of measurement being made and of what
part of a system is being measured.
Instruments are categorized as Synchronous or Asynchronous and independently
as Adding or Grouping. Synchronous instruments are called by the user with a
Context. Asynchronous instruments are called by the SDK during collection.
Additive instruments are semantically intended for capturing a sum. Grouping
instruments are intended for capturing a distribution.
Additive instruments may be monotonic, in which case they are non-decreasing
and naturally define a rate.
The synchronous instrument names are:
Counter: additive, monotonic
UpDownCounter: additive
ValueRecorder: grouping
and the asynchronous instruments are:
SumObserver: additive, monotonic
UpDownSumObserver: additive
ValueObserver: grouping
All instruments are provided with support for either float64 or int64 input
values.
An instrument is created using a Meter. Additionally, a Meter is used to
record batches of synchronous measurements or asynchronous observations. A
Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to
the instrumentation it instruments and must be named and versioned when
created with a MeterProvider with the name and version of the instrumentation
library.
Instrumentation should be designed to accept a MeterProvider from which it can
create its own unique Meter. Alternatively, the registered global
MeterProvider from the go.opentelemetry.io/otel package can be used as a
default.
*/
package metric // import "go.opentelemetry.io/otel/metric"

View File

@ -15,31 +15,24 @@
package global // import "go.opentelemetry.io/otel/metric/global"
import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/internal/global"
)
// Meter creates an implementation of the Meter interface from the global
// MeterProvider. The instrumentationName must be the name of the library
// providing instrumentation. This name may be the same as the instrumented
// code only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default name
// will be used instead.
// Meter returns a Meter from the global MeterProvider. The
// instrumentationName must be the name of the library providing
// instrumentation. This name may be the same as the instrumented code only if
// that code provides built-in instrumentation. If the instrumentationName is
// empty, then a implementation defined default name will be used instead.
//
// This is short for MeterProvider().Meter(name)
// This is short for MeterProvider().Meter(name).
func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return GetMeterProvider().Meter(instrumentationName, opts...)
return MeterProvider().Meter(instrumentationName, opts...)
}
// GetMeterProvider returns the registered global meter provider. If
// none is registered then a default meter provider is returned that
// forwards the Meter interface to the first registered Meter.
//
// Use the meter provider to create a named meter. E.g.
// meter := global.MeterProvider().Meter("example.com/foo")
// or
// meter := global.Meter("example.com/foo")
func GetMeterProvider() metric.MeterProvider {
// MeterProvider returns the registered global trace provider.
// If none is registered then a No-op MeterProvider is returned.
func MeterProvider() metric.MeterProvider {
return global.MeterProvider()
}

View File

@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Gauge creates an instrument for recording the current value.
Gauge(name string, opts ...instrument.Option) (Gauge, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// Gauge is an instrument that records independent readings.
type Gauge interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}

View File

@ -0,0 +1,70 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Gauge creates an instrument for recording the current value.
Gauge(name string, opts ...instrument.Option) (Gauge, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}
// Gauge is an instrument that records independent readings.
type Gauge interface {
// Observe records the state of the instrument.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
// error will be reported via the error handler.
Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
instrument.Asynchronous
}

View File

@ -0,0 +1,69 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instrument // import "go.opentelemetry.io/otel/metric/instrument"
import "go.opentelemetry.io/otel/metric/unit"
// Config contains options for metric instrument descriptors.
type Config struct {
description string
unit unit.Unit
}
// Description describes the instrument in human-readable terms.
func (cfg Config) Description() string {
return cfg.description
}
// Unit describes the measurement unit for an instrument.
func (cfg Config) Unit() unit.Unit {
return cfg.unit
}
// Option is an interface for applying metric instrument options.
type Option interface {
applyInstrument(Config) Config
}
// NewConfig creates a new Config and applies all the given options.
func NewConfig(opts ...Option) Config {
var config Config
for _, o := range opts {
config = o.applyInstrument(config)
}
return config
}
type optionFunc func(Config) Config
func (fn optionFunc) applyInstrument(cfg Config) Config {
return fn(cfg)
}
// WithDescription applies provided description.
func WithDescription(desc string) Option {
return optionFunc(func(cfg Config) Config {
cfg.description = desc
return cfg
})
}
// WithUnit applies provided unit.
func WithUnit(u unit.Unit) Option {
return optionFunc(func(cfg Config) Config {
cfg.unit = u
return cfg
})
}

View File

@ -0,0 +1,30 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instrument // import "go.opentelemetry.io/otel/metric/instrument"
// Asynchronous instruments are instruments that are updated within a Callback.
// If an instrument is observed outside of it's callback it should be an error.
//
// This interface is used as a grouping mechanism.
type Asynchronous interface {
asynchronous()
}
// Synchronous instruments are updated in line with application code.
//
// This interface is used as a grouping mechanism.
type Synchronous interface {
synchronous()
}

View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Histogram creates an instrument for recording a distribution of values.
Histogram(name string, opts ...instrument.Option) (Histogram, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// Histogram is an instrument that records a distribution of values.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
instrument.Synchronous
}

View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument"
)
// InstrumentProvider provides access to individual instruments.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
// UpDownCounter creates an instrument for recording changes of a value.
UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
// Histogram creates an instrument for recording a distribution of values.
Histogram(name string, opts ...instrument.Option) (Histogram, error)
}
// Counter is an instrument that records increasing values.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// UpDownCounter is an instrument that records increasing or decreasing values.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}
// Histogram is an instrument that records a distribution of values.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
instrument.Synchronous
}

View File

@ -1,28 +0,0 @@
// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT.
package metric
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ValueRecorderInstrumentKind-0]
_ = x[ValueObserverInstrumentKind-1]
_ = x[CounterInstrumentKind-2]
_ = x[UpDownCounterInstrumentKind-3]
_ = x[SumObserverInstrumentKind-4]
_ = x[UpDownSumObserverInstrumentKind-5]
}
const _InstrumentKind_name = "ValueRecorderInstrumentKindValueObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindSumObserverInstrumentKindUpDownSumObserverInstrumentKind"
var _InstrumentKind_index = [...]uint8{0, 27, 54, 75, 102, 127, 158}
func (i InstrumentKind) String() string {
if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) {
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
}

View File

@ -0,0 +1,360 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/metric/internal/global"
import (
"context"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
)
type afCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.Counter
instrument.Asynchronous
}
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...)
}
}
func (i *afCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.Counter)
}
return nil
}
type afUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.UpDownCounter
instrument.Asynchronous
}
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...)
}
}
func (i *afUpDownCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.UpDownCounter)
}
return nil
}
type afGauge struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncfloat64.Gauge
instrument.Asynchronous
}
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...)
}
}
func (i *afGauge) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncfloat64.Gauge)
}
return nil
}
type aiCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.Counter
instrument.Asynchronous
}
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.Counter).Observe(ctx, x, attrs...)
}
}
func (i *aiCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.Counter)
}
return nil
}
type aiUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.UpDownCounter
instrument.Asynchronous
}
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...)
}
}
func (i *aiUpDownCounter) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.UpDownCounter)
}
return nil
}
type aiGauge struct {
name string
opts []instrument.Option
delegate atomic.Value //asyncint64.Gauge
instrument.Asynchronous
}
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...)
}
}
func (i *aiGauge) unwrap() instrument.Asynchronous {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(asyncint64.Gauge)
}
return nil
}
//Sync Instruments.
type sfCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.Counter
instrument.Synchronous
}
func (i *sfCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...)
}
}
type sfUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.UpDownCounter
instrument.Synchronous
}
func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...)
}
}
type sfHistogram struct {
name string
opts []instrument.Option
delegate atomic.Value //syncfloat64.Histogram
instrument.Synchronous
}
func (i *sfHistogram) setDelegate(m metric.Meter) {
ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...)
}
}
type siCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.Counter
instrument.Synchronous
}
func (i *siCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().Counter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.Counter).Add(ctx, x, attrs...)
}
}
type siUpDownCounter struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.UpDownCounter
instrument.Synchronous
}
func (i *siUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...)
}
}
type siHistogram struct {
name string
opts []instrument.Option
delegate atomic.Value //syncint64.Histogram
instrument.Synchronous
}
func (i *siHistogram) setDelegate(m metric.Meter) {
ctr, err := m.SyncInt64().Histogram(i.name, i.opts...)
if err != nil {
otel.Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(syncint64.Histogram).Record(ctx, x, attrs...)
}
}

Some files were not shown because too many files have changed in this diff Show More