mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
@ -12,20 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform
|
||||
// Package internal contains common functionality for all OTLP exporters.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
|
||||
|
||||
import (
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func instrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary {
|
||||
if il == (instrumentation.Library{}) {
|
||||
return nil
|
||||
// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
|
||||
func CleanPath(urlPath string, defaultPath string) string {
|
||||
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||
if tmp == "." {
|
||||
return defaultPath
|
||||
}
|
||||
return &commonpb.InstrumentationLibrary{
|
||||
Name: il.Name,
|
||||
Version: il.Version,
|
||||
if !path.IsAbs(tmp) {
|
||||
tmp = fmt.Sprintf("/%s", tmp)
|
||||
}
|
||||
return tmp
|
||||
}
|
148
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
generated
vendored
Normal file
148
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConfigFn is the generic function used to set a config.
|
||||
type ConfigFn func(*EnvOptionsReader)
|
||||
|
||||
// EnvOptionsReader reads the required environment variables.
|
||||
type EnvOptionsReader struct {
|
||||
GetEnv func(string) string
|
||||
ReadFile func(string) ([]byte, error)
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Apply runs every ConfigFn.
|
||||
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||
for _, o := range opts {
|
||||
o(e)
|
||||
}
|
||||
}
|
||||
|
||||
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||
// using the GetEnv function.
|
||||
// This function prepends the OTLP specified namespace to all key lookups.
|
||||
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||
return v, v != ""
|
||||
}
|
||||
|
||||
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
if d, err := strconv.Atoi(v); err == nil {
|
||||
fn(time.Duration(d) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(stringToHeader(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
if u, err := url.Parse(v); err == nil {
|
||||
fn(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config.
|
||||
func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
if b, err := e.ReadFile(v); err == nil {
|
||||
if c, err := createTLSConfig(b); err == nil {
|
||||
fn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func keyWithNamespace(ns, key string) string {
|
||||
if ns == "" {
|
||||
return key
|
||||
}
|
||||
return fmt.Sprintf("%s_%s", ns, key)
|
||||
}
|
||||
|
||||
func stringToHeader(value string) map[string]string {
|
||||
headersPairs := strings.Split(value, ",")
|
||||
headers := make(map[string]string)
|
||||
|
||||
for _, header := range headersPairs {
|
||||
nameValue := strings.SplitN(header, "=", 2)
|
||||
if len(nameValue) < 2 {
|
||||
continue
|
||||
}
|
||||
name, err := url.QueryUnescape(nameValue[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
value, err := url.QueryUnescape(nameValue[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
trimmedValue := strings.TrimSpace(value)
|
||||
|
||||
headers[trimmedName] = trimmedValue
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func createTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: cp,
|
||||
}, nil
|
||||
}
|
196
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/envconfig.go
generated
vendored
196
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/envconfig.go
generated
vendored
@ -1,196 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
func ApplyGRPCEnvConfigs(cfg *Config) {
|
||||
e := EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: ioutil.ReadFile,
|
||||
}
|
||||
|
||||
e.ApplyGRPCEnvConfigs(cfg)
|
||||
}
|
||||
|
||||
func ApplyHTTPEnvConfigs(cfg *Config) {
|
||||
e := EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: ioutil.ReadFile,
|
||||
}
|
||||
|
||||
e.ApplyHTTPEnvConfigs(cfg)
|
||||
}
|
||||
|
||||
type EnvOptionsReader struct {
|
||||
GetEnv func(string) string
|
||||
ReadFile func(filename string) ([]byte, error)
|
||||
}
|
||||
|
||||
func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg *Config) {
|
||||
opts := e.GetOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
opt.ApplyHTTPOption(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg *Config) {
|
||||
opts := e.GetOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
opt.ApplyGRPCOption(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption {
|
||||
var opts []GenericOption
|
||||
|
||||
// Endpoint
|
||||
if v, ok := e.getEnvValue("ENDPOINT"); ok {
|
||||
opts = append(opts, WithEndpoint(v))
|
||||
}
|
||||
if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok {
|
||||
opts = append(opts, WithTracesEndpoint(v))
|
||||
}
|
||||
if v, ok := e.getEnvValue("METRICS_ENDPOINT"); ok {
|
||||
opts = append(opts, WithMetricsEndpoint(v))
|
||||
}
|
||||
|
||||
// Certificate File
|
||||
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
|
||||
if tls, err := e.readTLSConfig(path); err == nil {
|
||||
opts = append(opts, WithTLSClientConfig(tls))
|
||||
} else {
|
||||
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
|
||||
}
|
||||
}
|
||||
if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok {
|
||||
if tls, err := e.readTLSConfig(path); err == nil {
|
||||
opts = append(opts, WithTracesTLSClientConfig(tls))
|
||||
} else {
|
||||
otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err))
|
||||
}
|
||||
}
|
||||
if path, ok := e.getEnvValue("METRICS_CERTIFICATE"); ok {
|
||||
if tls, err := e.readTLSConfig(path); err == nil {
|
||||
opts = append(opts, WithMetricsTLSClientConfig(tls))
|
||||
} else {
|
||||
otel.Handle(fmt.Errorf("failed to configure otlp metrics exporter certificate '%s': %w", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Headers
|
||||
if h, ok := e.getEnvValue("HEADERS"); ok {
|
||||
opts = append(opts, WithHeaders(stringToHeader(h)))
|
||||
}
|
||||
if h, ok := e.getEnvValue("TRACES_HEADERS"); ok {
|
||||
opts = append(opts, WithTracesHeaders(stringToHeader(h)))
|
||||
}
|
||||
if h, ok := e.getEnvValue("METRICS_HEADERS"); ok {
|
||||
opts = append(opts, WithMetricsHeaders(stringToHeader(h)))
|
||||
}
|
||||
|
||||
// Compression
|
||||
if c, ok := e.getEnvValue("COMPRESSION"); ok {
|
||||
opts = append(opts, WithCompression(stringToCompression(c)))
|
||||
}
|
||||
if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok {
|
||||
opts = append(opts, WithTracesCompression(stringToCompression(c)))
|
||||
}
|
||||
if c, ok := e.getEnvValue("METRICS_COMPRESSION"); ok {
|
||||
opts = append(opts, WithMetricsCompression(stringToCompression(c)))
|
||||
}
|
||||
|
||||
// Timeout
|
||||
if t, ok := e.getEnvValue("TIMEOUT"); ok {
|
||||
if d, err := strconv.Atoi(t); err == nil {
|
||||
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
|
||||
}
|
||||
}
|
||||
if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok {
|
||||
if d, err := strconv.Atoi(t); err == nil {
|
||||
opts = append(opts, WithTracesTimeout(time.Duration(d)*time.Millisecond))
|
||||
}
|
||||
}
|
||||
if t, ok := e.getEnvValue("METRICS_TIMEOUT"); ok {
|
||||
if d, err := strconv.Atoi(t); err == nil {
|
||||
opts = append(opts, WithMetricsTimeout(time.Duration(d)*time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function.
|
||||
// This function already prepends the OTLP prefix to all key lookup.
|
||||
func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) {
|
||||
v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key)))
|
||||
return v, v != ""
|
||||
}
|
||||
|
||||
func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) {
|
||||
b, err := e.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return CreateTLSConfig(b)
|
||||
}
|
||||
|
||||
func stringToCompression(value string) otlp.Compression {
|
||||
switch value {
|
||||
case "gzip":
|
||||
return otlp.GzipCompression
|
||||
}
|
||||
|
||||
return otlp.NoCompression
|
||||
}
|
||||
|
||||
func stringToHeader(value string) map[string]string {
|
||||
headersPairs := strings.Split(value, ",")
|
||||
headers := make(map[string]string)
|
||||
|
||||
for _, header := range headersPairs {
|
||||
nameValue := strings.SplitN(header, "=", 2)
|
||||
if len(nameValue) < 2 {
|
||||
continue
|
||||
}
|
||||
name, err := url.QueryUnescape(nameValue[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
value, err := url.QueryUnescape(nameValue[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
trimmedValue := strings.TrimSpace(value)
|
||||
|
||||
headers[trimmedName] = trimmedValue
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
376
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/options.go
generated
vendored
376
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/options.go
generated
vendored
@ -1,376 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMaxAttempts describes how many times the driver
|
||||
// should retry the sending of the payload in case of a
|
||||
// retryable error.
|
||||
DefaultMaxAttempts int = 5
|
||||
// DefaultTracesPath is a default URL path for endpoint that
|
||||
// receives spans.
|
||||
DefaultTracesPath string = "/v1/traces"
|
||||
// DefaultMetricsPath is a default URL path for endpoint that
|
||||
// receives metrics.
|
||||
DefaultMetricsPath string = "/v1/metrics"
|
||||
// DefaultBackoff is a default base backoff time used in the
|
||||
// exponential backoff strategy.
|
||||
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||
// DefaultTimeout is a default max waiting time for the backend to process
|
||||
// each span or metrics batch.
|
||||
DefaultTimeout time.Duration = 10 * time.Second
|
||||
// DefaultServiceConfig is the gRPC service config used if none is
|
||||
// provided by the user.
|
||||
DefaultServiceConfig = `{
|
||||
"methodConfig":[{
|
||||
"name":[
|
||||
{ "service":"opentelemetry.proto.collector.metrics.v1.MetricsService" },
|
||||
{ "service":"opentelemetry.proto.collector.trace.v1.TraceService" }
|
||||
],
|
||||
"retryPolicy":{
|
||||
"MaxAttempts":5,
|
||||
"InitialBackoff":"0.3s",
|
||||
"MaxBackoff":"5s",
|
||||
"BackoffMultiplier":2,
|
||||
"RetryableStatusCodes":[
|
||||
"CANCELLED",
|
||||
"DEADLINE_EXCEEDED",
|
||||
"RESOURCE_EXHAUSTED",
|
||||
"ABORTED",
|
||||
"OUT_OF_RANGE",
|
||||
"UNAVAILABLE",
|
||||
"DATA_LOSS"
|
||||
]
|
||||
}
|
||||
}]
|
||||
}`
|
||||
)
|
||||
|
||||
type (
|
||||
SignalConfig struct {
|
||||
Endpoint string
|
||||
Insecure bool
|
||||
TLSCfg *tls.Config
|
||||
Headers map[string]string
|
||||
Compression otlp.Compression
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
}
|
||||
|
||||
Config struct {
|
||||
// Signal specific configurations
|
||||
Metrics SignalConfig
|
||||
Traces SignalConfig
|
||||
|
||||
// General configurations
|
||||
MaxAttempts int
|
||||
Backoff time.Duration
|
||||
|
||||
// HTTP configuration
|
||||
Marshaler otlp.Marshaler
|
||||
|
||||
// gRPC configurations
|
||||
ReconnectionPeriod time.Duration
|
||||
ServiceConfig string
|
||||
DialOptions []grpc.DialOption
|
||||
}
|
||||
)
|
||||
|
||||
func NewDefaultConfig() Config {
|
||||
c := Config{
|
||||
Traces: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
|
||||
URLPath: DefaultTracesPath,
|
||||
Compression: otlp.NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
},
|
||||
Metrics: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", otlp.DefaultCollectorHost, otlp.DefaultCollectorPort),
|
||||
URLPath: DefaultMetricsPath,
|
||||
Compression: otlp.NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
},
|
||||
MaxAttempts: DefaultMaxAttempts,
|
||||
Backoff: DefaultBackoff,
|
||||
ServiceConfig: DefaultServiceConfig,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type (
|
||||
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||
GenericOption interface {
|
||||
ApplyHTTPOption(*Config)
|
||||
ApplyGRPCOption(*Config)
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// HTTPOption applies an option to the HTTP driver.
|
||||
HTTPOption interface {
|
||||
ApplyHTTPOption(*Config)
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// GRPCOption applies an option to the gRPC driver.
|
||||
GRPCOption interface {
|
||||
ApplyGRPCOption(*Config)
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
)
|
||||
|
||||
// genericOption is an option that applies the same logic
|
||||
// for both gRPC and HTTP.
|
||||
type genericOption struct {
|
||||
fn func(*Config)
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyGRPCOption(cfg *Config) {
|
||||
g.fn(cfg)
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyHTTPOption(cfg *Config) {
|
||||
g.fn(cfg)
|
||||
}
|
||||
|
||||
func (genericOption) private() {}
|
||||
|
||||
func newGenericOption(fn func(cfg *Config)) GenericOption {
|
||||
return &genericOption{fn: fn}
|
||||
}
|
||||
|
||||
// splitOption is an option that applies different logics
|
||||
// for gRPC and HTTP.
|
||||
type splitOption struct {
|
||||
httpFn func(*Config)
|
||||
grpcFn func(*Config)
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyGRPCOption(cfg *Config) {
|
||||
g.grpcFn(cfg)
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyHTTPOption(cfg *Config) {
|
||||
g.httpFn(cfg)
|
||||
}
|
||||
|
||||
func (splitOption) private() {}
|
||||
|
||||
func newSplitOption(httpFn func(cfg *Config), grpcFn func(cfg *Config)) GenericOption {
|
||||
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||
}
|
||||
|
||||
// httpOption is an option that is only applied to the HTTP driver.
|
||||
type httpOption struct {
|
||||
fn func(*Config)
|
||||
}
|
||||
|
||||
func (h *httpOption) ApplyHTTPOption(cfg *Config) {
|
||||
h.fn(cfg)
|
||||
}
|
||||
|
||||
func (httpOption) private() {}
|
||||
|
||||
func NewHTTPOption(fn func(cfg *Config)) HTTPOption {
|
||||
return &httpOption{fn: fn}
|
||||
}
|
||||
|
||||
// grpcOption is an option that is only applied to the gRPC driver.
|
||||
type grpcOption struct {
|
||||
fn func(*Config)
|
||||
}
|
||||
|
||||
func (h *grpcOption) ApplyGRPCOption(cfg *Config) {
|
||||
h.fn(cfg)
|
||||
}
|
||||
|
||||
func (grpcOption) private() {}
|
||||
|
||||
func NewGRPCOption(fn func(cfg *Config)) GRPCOption {
|
||||
return &grpcOption{fn: fn}
|
||||
}
|
||||
|
||||
// Generic Options
|
||||
|
||||
func WithEndpoint(endpoint string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Endpoint = endpoint
|
||||
cfg.Metrics.Endpoint = endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesEndpoint(endpoint string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Endpoint = endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsEndpoint(endpoint string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.Endpoint = endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func WithCompression(compression otlp.Compression) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Compression = compression
|
||||
cfg.Metrics.Compression = compression
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesCompression(compression otlp.Compression) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Compression = compression
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsCompression(compression otlp.Compression) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.Compression = compression
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesURLPath(urlPath string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.URLPath = urlPath
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsURLPath(urlPath string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.URLPath = urlPath
|
||||
})
|
||||
}
|
||||
|
||||
func WithMaxAttempts(maxAttempts int) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.MaxAttempts = maxAttempts
|
||||
})
|
||||
}
|
||||
|
||||
func WithBackoff(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Backoff = duration
|
||||
})
|
||||
}
|
||||
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||
return newSplitOption(func(cfg *Config) {
|
||||
cfg.Traces.TLSCfg = tlsCfg.Clone()
|
||||
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||
}, func(cfg *Config) {
|
||||
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||
return newSplitOption(func(cfg *Config) {
|
||||
cfg.Traces.TLSCfg = tlsCfg.Clone()
|
||||
}, func(cfg *Config) {
|
||||
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||
return newSplitOption(func(cfg *Config) {
|
||||
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||
}, func(cfg *Config) {
|
||||
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
})
|
||||
}
|
||||
|
||||
func WithInsecure() GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Insecure = true
|
||||
cfg.Metrics.Insecure = true
|
||||
})
|
||||
}
|
||||
|
||||
func WithInsecureTraces() GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Insecure = true
|
||||
})
|
||||
}
|
||||
|
||||
func WithInsecureMetrics() GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.Insecure = true
|
||||
})
|
||||
}
|
||||
|
||||
func WithHeaders(headers map[string]string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Headers = headers
|
||||
cfg.Metrics.Headers = headers
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesHeaders(headers map[string]string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Headers = headers
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsHeaders(headers map[string]string) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.Headers = headers
|
||||
})
|
||||
}
|
||||
|
||||
func WithTimeout(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Timeout = duration
|
||||
cfg.Metrics.Timeout = duration
|
||||
})
|
||||
}
|
||||
|
||||
func WithTracesTimeout(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Traces.Timeout = duration
|
||||
})
|
||||
}
|
||||
|
||||
func WithMetricsTimeout(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg *Config) {
|
||||
cfg.Metrics.Timeout = duration
|
||||
})
|
||||
}
|
69
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/tls.go
generated
vendored
69
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig/tls.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
const (
|
||||
WeakCertificate = `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
|
||||
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
|
||||
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
|
||||
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
|
||||
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
|
||||
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
|
||||
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
|
||||
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
|
||||
Lhnm4N/QDk5rek0=
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
WeakPrivateKey = `
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
|
||||
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
|
||||
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
|
||||
-----END PRIVATE KEY-----
|
||||
`
|
||||
)
|
||||
|
||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||
// a tls.Config that will use this certifate to verify a server certificate.
|
||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return CreateTLSConfig(b)
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||
// to verify a server certificate.
|
||||
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: cp,
|
||||
}, nil
|
||||
}
|
68
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
generated
vendored
Normal file
68
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// PartialSuccessDropKind indicates the kind of partial success error
|
||||
// received by an OTLP exporter, which corresponds with the signal
|
||||
// being exported.
|
||||
type PartialSuccessDropKind string
|
||||
|
||||
const (
|
||||
// TracingPartialSuccess indicates that some spans were rejected.
|
||||
TracingPartialSuccess PartialSuccessDropKind = "spans"
|
||||
|
||||
// MetricsPartialSuccess indicates that some metric data points were rejected.
|
||||
MetricsPartialSuccess PartialSuccessDropKind = "metric data points"
|
||||
)
|
||||
|
||||
// PartialSuccess represents the underlying error for all handling
|
||||
// OTLP partial success messages. Use `errors.Is(err,
|
||||
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||
// error handler belongs to this category.
|
||||
type PartialSuccess struct {
|
||||
ErrorMessage string
|
||||
RejectedItems int64
|
||||
RejectedKind PartialSuccessDropKind
|
||||
}
|
||||
|
||||
var _ error = PartialSuccess{}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (ps PartialSuccess) Error() string {
|
||||
msg := ps.ErrorMessage
|
||||
if msg == "" {
|
||||
msg = "empty message"
|
||||
}
|
||||
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||
}
|
||||
|
||||
// Is supports the errors.Is() interface.
|
||||
func (ps PartialSuccess) Is(err error) bool {
|
||||
_, ok := err.(PartialSuccess)
|
||||
return ok
|
||||
}
|
||||
|
||||
// PartialSuccessToError produces an error suitable for passing to
|
||||
// `otel.Handle()` out of the fields in a partial success response,
|
||||
// independent of which signal produced the outcome.
|
||||
func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error {
|
||||
return PartialSuccess{
|
||||
ErrorMessage: errorMessage,
|
||||
RejectedItems: itemsRejected,
|
||||
RejectedKind: kind,
|
||||
}
|
||||
}
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
150
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
generated
vendored
Normal file
150
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides request retry functionality that can perform
|
||||
// configurable exponential backoff for transient errors and honor any
|
||||
// explicit throttle responses received.
|
||||
package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
var DefaultConfig = Config{
|
||||
Enabled: true,
|
||||
InitialInterval: 5 * time.Second,
|
||||
MaxInterval: 30 * time.Second,
|
||||
MaxElapsedTime: time.Minute,
|
||||
}
|
||||
|
||||
// Config defines configuration for retrying batches in case of export failure
|
||||
// using an exponential backoff.
|
||||
type Config struct {
|
||||
// Enabled indicates whether to not retry sending batches in case of
|
||||
// export failure.
|
||||
Enabled bool
|
||||
// InitialInterval the time to wait after the first failure before
|
||||
// retrying.
|
||||
InitialInterval time.Duration
|
||||
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||
// reached the delay between consecutive retries will always be
|
||||
// `MaxInterval`.
|
||||
MaxInterval time.Duration
|
||||
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||
// trying to send a request/batch. Once this value is reached, the data
|
||||
// is discarded.
|
||||
MaxElapsedTime time.Duration
|
||||
}
|
||||
|
||||
// RequestFunc wraps a request with retry logic.
|
||||
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||
|
||||
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||
// duration should be honored that was included in the error.
|
||||
//
|
||||
// The function must return true if the error argument is retry-able,
|
||||
// otherwise it must return false for the first return parameter.
|
||||
//
|
||||
// The function must return a non-zero time.Duration if the error contains
|
||||
// explicit throttle duration that should be honored, otherwise it must return
|
||||
// a zero valued time.Duration.
|
||||
type EvaluateFunc func(error) (bool, time.Duration)
|
||||
|
||||
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||
// if requests can be retried and based on the exponential backoff
|
||||
// configuration of c.
|
||||
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
if !c.Enabled {
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
return fn(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||
// must call Reset after changing the InitialInterval (this saves an
|
||||
// unnecessary call to Now).
|
||||
b := &backoff.ExponentialBackOff{
|
||||
InitialInterval: c.InitialInterval,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
retryable, throttle := evaluate(err)
|
||||
if !retryable {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if err := waitFunc(ctx, delay); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow override for testing.
|
||||
var waitFunc = wait
|
||||
|
||||
func wait(ctx context.Context, delay time.Duration) error {
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Handle the case where the timer and context deadline end
|
||||
// simultaneously by prioritizing the timer expiration nil value
|
||||
// response.
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
return ctx.Err()
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
141
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/attribute.go
generated
vendored
141
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/attribute.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Attributes transforms a slice of KeyValues into a slice of OTLP attribute key-values.
|
||||
func Attributes(attrs []attribute.KeyValue) []*commonpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, toAttribute(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceAttributes transforms a Resource into a slice of OTLP attribute key-values.
|
||||
func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue {
|
||||
if resource.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, resource.Len())
|
||||
for iter := resource.Iter(); iter.Next(); {
|
||||
out = append(out, toAttribute(iter.Attribute()))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func toAttribute(v attribute.KeyValue) *commonpb.KeyValue {
|
||||
result := &commonpb.KeyValue{
|
||||
Key: string(v.Key),
|
||||
Value: new(commonpb.AnyValue),
|
||||
}
|
||||
switch v.Value.Type() {
|
||||
case attribute.BOOL:
|
||||
result.Value.Value = &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v.Value.AsBool(),
|
||||
}
|
||||
case attribute.INT64:
|
||||
result.Value.Value = &commonpb.AnyValue_IntValue{
|
||||
IntValue: v.Value.AsInt64(),
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
result.Value.Value = &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.Value.AsFloat64(),
|
||||
}
|
||||
case attribute.STRING:
|
||||
result.Value.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: v.Value.AsString(),
|
||||
}
|
||||
case attribute.ARRAY:
|
||||
result.Value.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: arrayValues(v),
|
||||
},
|
||||
}
|
||||
default:
|
||||
result.Value.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func arrayValues(kv attribute.KeyValue) []*commonpb.AnyValue {
|
||||
a := kv.Value.AsArray()
|
||||
aType := reflect.TypeOf(a)
|
||||
var valueFunc func(reflect.Value) *commonpb.AnyValue
|
||||
switch aType.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
|
||||
return &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v.Bool(),
|
||||
},
|
||||
}
|
||||
}
|
||||
case reflect.Int, reflect.Int64:
|
||||
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
|
||||
return &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: v.Int(),
|
||||
},
|
||||
}
|
||||
}
|
||||
case reflect.Uintptr:
|
||||
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
|
||||
return &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: int64(v.Uint()),
|
||||
},
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
|
||||
return &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.Float(),
|
||||
},
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
|
||||
return &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: v.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*commonpb.AnyValue, aType.Len())
|
||||
for i, aValue := 0, reflect.ValueOf(a); i < aValue.Len(); i++ {
|
||||
results[i] = valueFunc(aValue.Index(i))
|
||||
}
|
||||
return results
|
||||
}
|
631
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/metric.go
generated
vendored
631
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/metric.go
generated
vendored
@ -1,631 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transform provides translations for opentelemetry-go concepts and
|
||||
// structures to otlp structures.
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
|
||||
// aggregator is attempted.
|
||||
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
|
||||
|
||||
// ErrIncompatibleAgg is returned when
|
||||
// aggregation.Kind implies an interface conversion that has
|
||||
// failed
|
||||
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
|
||||
|
||||
// ErrUnknownValueType is returned when a transformation of an unknown value
|
||||
// is attempted.
|
||||
ErrUnknownValueType = errors.New("invalid value type")
|
||||
|
||||
// ErrContextCanceled is returned when a context cancellation halts a
|
||||
// transformation.
|
||||
ErrContextCanceled = errors.New("context canceled")
|
||||
|
||||
// ErrTransforming is returned when an unexected error is encoutered transforming.
|
||||
ErrTransforming = errors.New("transforming failed")
|
||||
)
|
||||
|
||||
// result is the product of transforming Records into OTLP Metrics.
|
||||
type result struct {
|
||||
Resource *resource.Resource
|
||||
InstrumentationLibrary instrumentation.Library
|
||||
Metric *metricpb.Metric
|
||||
Err error
|
||||
}
|
||||
|
||||
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
||||
func toNanos(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
}
|
||||
|
||||
// CheckpointSet transforms all records contained in a checkpoint into
|
||||
// batched OTLP ResourceMetrics.
|
||||
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
|
||||
records, errc := source(ctx, exportSelector, cps)
|
||||
|
||||
// Start a fixed number of goroutines to transform records.
|
||||
transformed := make(chan result)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(int(numWorkers))
|
||||
for i := uint(0); i < numWorkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
transformer(ctx, exportSelector, records, transformed)
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(transformed)
|
||||
}()
|
||||
|
||||
// Synchronously collect the transformed records and transmit.
|
||||
rms, err := sink(ctx, transformed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// source is complete, check for any errors.
|
||||
if err := <-errc; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rms, nil
|
||||
}
|
||||
|
||||
// source starts a goroutine that sends each one of the Records yielded by
|
||||
// the CheckpointSet on the returned chan. Any error encoutered will be sent
|
||||
// on the returned error chan after seeding is complete.
|
||||
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
|
||||
errc := make(chan error, 1)
|
||||
out := make(chan export.Record)
|
||||
// Seed records into process.
|
||||
go func() {
|
||||
defer close(out)
|
||||
// No select is needed since errc is buffered.
|
||||
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrContextCanceled
|
||||
case out <- r:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
return out, errc
|
||||
}
|
||||
|
||||
// transformer transforms records read from the passed in chan into
|
||||
// OTLP Metrics which are sent on the out chan.
|
||||
func transformer(ctx context.Context, exportSelector export.ExportKindSelector, in <-chan export.Record, out chan<- result) {
|
||||
for r := range in {
|
||||
m, err := Record(exportSelector, r)
|
||||
// Propagate errors, but do not send empty results.
|
||||
if err == nil && m == nil {
|
||||
continue
|
||||
}
|
||||
res := result{
|
||||
Resource: r.Resource(),
|
||||
InstrumentationLibrary: instrumentation.Library{
|
||||
Name: r.Descriptor().InstrumentationName(),
|
||||
Version: r.Descriptor().InstrumentationVersion(),
|
||||
},
|
||||
Metric: m,
|
||||
Err: err,
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case out <- res:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sink collects transformed Records and batches them.
|
||||
//
|
||||
// Any errors encoutered transforming input will be reported with an
|
||||
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
||||
// caller to handle any incorrect data in these ResourceMetrics.
|
||||
func sink(ctx context.Context, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
|
||||
var errStrings []string
|
||||
|
||||
type resourceBatch struct {
|
||||
Resource *resourcepb.Resource
|
||||
// Group by instrumentation library name and then the MetricDescriptor.
|
||||
InstrumentationLibraryBatches map[instrumentation.Library]map[string]*metricpb.Metric
|
||||
}
|
||||
|
||||
// group by unique Resource string.
|
||||
grouped := make(map[attribute.Distinct]resourceBatch)
|
||||
for res := range in {
|
||||
if res.Err != nil {
|
||||
errStrings = append(errStrings, res.Err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
rID := res.Resource.Equivalent()
|
||||
rb, ok := grouped[rID]
|
||||
if !ok {
|
||||
rb = resourceBatch{
|
||||
Resource: Resource(res.Resource),
|
||||
InstrumentationLibraryBatches: make(map[instrumentation.Library]map[string]*metricpb.Metric),
|
||||
}
|
||||
grouped[rID] = rb
|
||||
}
|
||||
|
||||
mb, ok := rb.InstrumentationLibraryBatches[res.InstrumentationLibrary]
|
||||
if !ok {
|
||||
mb = make(map[string]*metricpb.Metric)
|
||||
rb.InstrumentationLibraryBatches[res.InstrumentationLibrary] = mb
|
||||
}
|
||||
|
||||
mID := res.Metric.GetName()
|
||||
m, ok := mb[mID]
|
||||
if !ok {
|
||||
mb[mID] = res.Metric
|
||||
continue
|
||||
}
|
||||
switch res.Metric.Data.(type) {
|
||||
case *metricpb.Metric_IntGauge:
|
||||
m.GetIntGauge().DataPoints = append(m.GetIntGauge().DataPoints, res.Metric.GetIntGauge().DataPoints...)
|
||||
case *metricpb.Metric_IntHistogram:
|
||||
m.GetIntHistogram().DataPoints = append(m.GetIntHistogram().DataPoints, res.Metric.GetIntHistogram().DataPoints...)
|
||||
case *metricpb.Metric_IntSum:
|
||||
m.GetIntSum().DataPoints = append(m.GetIntSum().DataPoints, res.Metric.GetIntSum().DataPoints...)
|
||||
case *metricpb.Metric_DoubleGauge:
|
||||
m.GetDoubleGauge().DataPoints = append(m.GetDoubleGauge().DataPoints, res.Metric.GetDoubleGauge().DataPoints...)
|
||||
case *metricpb.Metric_DoubleHistogram:
|
||||
m.GetDoubleHistogram().DataPoints = append(m.GetDoubleHistogram().DataPoints, res.Metric.GetDoubleHistogram().DataPoints...)
|
||||
case *metricpb.Metric_DoubleSum:
|
||||
m.GetDoubleSum().DataPoints = append(m.GetDoubleSum().DataPoints, res.Metric.GetDoubleSum().DataPoints...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if len(grouped) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rms []*metricpb.ResourceMetrics
|
||||
for _, rb := range grouped {
|
||||
rm := &metricpb.ResourceMetrics{Resource: rb.Resource}
|
||||
for il, mb := range rb.InstrumentationLibraryBatches {
|
||||
ilm := &metricpb.InstrumentationLibraryMetrics{
|
||||
Metrics: make([]*metricpb.Metric, 0, len(mb)),
|
||||
}
|
||||
if il != (instrumentation.Library{}) {
|
||||
ilm.InstrumentationLibrary = &commonpb.InstrumentationLibrary{
|
||||
Name: il.Name,
|
||||
Version: il.Version,
|
||||
}
|
||||
}
|
||||
for _, m := range mb {
|
||||
ilm.Metrics = append(ilm.Metrics, m)
|
||||
}
|
||||
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
|
||||
}
|
||||
rms = append(rms, rm)
|
||||
}
|
||||
|
||||
// Report any transform errors.
|
||||
if len(errStrings) > 0 {
|
||||
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||
}
|
||||
return rms, nil
|
||||
}
|
||||
|
||||
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
||||
// error is returned if the Record Aggregator is not supported.
|
||||
func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricpb.Metric, error) {
|
||||
agg := r.Aggregation()
|
||||
switch agg.Kind() {
|
||||
case aggregation.MinMaxSumCountKind:
|
||||
mmsc, ok := agg.(aggregation.MinMaxSumCount)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
return minMaxSumCount(r, mmsc)
|
||||
|
||||
case aggregation.HistogramKind:
|
||||
h, ok := agg.(aggregation.Histogram)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
return histogramPoint(r, exportSelector.ExportKindFor(r.Descriptor(), aggregation.HistogramKind), h)
|
||||
|
||||
case aggregation.SumKind:
|
||||
s, ok := agg.(aggregation.Sum)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
sum, err := s.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sumPoint(r, sum, r.StartTime(), r.EndTime(), exportSelector.ExportKindFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
|
||||
|
||||
case aggregation.LastValueKind:
|
||||
lv, ok := agg.(aggregation.LastValue)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
value, tm, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gaugePoint(r, value, time.Time{}, tm)
|
||||
|
||||
case aggregation.ExactKind:
|
||||
e, ok := agg.(aggregation.Points)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
pts, err := e.Points()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gaugeArray(r, pts)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
|
||||
}
|
||||
}
|
||||
|
||||
func gaugeArray(record export.Record, points []aggregation.Point) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
labels := record.Labels()
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
pbLabels := stringKeyValues(labels.Iter())
|
||||
|
||||
switch nk := desc.NumberKind(); nk {
|
||||
case number.Int64Kind:
|
||||
var pts []*metricpb.IntDataPoint
|
||||
for _, s := range points {
|
||||
pts = append(pts, &metricpb.IntDataPoint{
|
||||
Labels: pbLabels,
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Value: s.Number.CoerceToInt64(nk),
|
||||
})
|
||||
}
|
||||
m.Data = &metricpb.Metric_IntGauge{
|
||||
IntGauge: &metricpb.IntGauge{
|
||||
DataPoints: pts,
|
||||
},
|
||||
}
|
||||
|
||||
case number.Float64Kind:
|
||||
var pts []*metricpb.DoubleDataPoint
|
||||
for _, s := range points {
|
||||
pts = append(pts, &metricpb.DoubleDataPoint{
|
||||
Labels: pbLabels,
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Value: s.Number.CoerceToFloat64(nk),
|
||||
})
|
||||
}
|
||||
m.Data = &metricpb.Metric_DoubleGauge{
|
||||
DoubleGauge: &metricpb.DoubleGauge{
|
||||
DataPoints: pts,
|
||||
},
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, nk)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
labels := record.Labels()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_IntGauge{
|
||||
IntGauge: &metricpb.IntGauge{
|
||||
DataPoints: []*metricpb.IntDataPoint{
|
||||
{
|
||||
Value: num.CoerceToInt64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_DoubleGauge{
|
||||
DoubleGauge: &metricpb.DoubleGauge{
|
||||
DataPoints: []*metricpb.DoubleDataPoint{
|
||||
{
|
||||
Value: num.CoerceToFloat64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func exportKindToTemporality(ek export.ExportKind) metricpb.AggregationTemporality {
|
||||
switch ek {
|
||||
case export.DeltaExportKind:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
|
||||
case export.CumulativeExportKind:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
}
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
|
||||
}
|
||||
|
||||
func sumPoint(record export.Record, num number.Number, start, end time.Time, ek export.ExportKind, monotonic bool) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
labels := record.Labels()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_IntSum{
|
||||
IntSum: &metricpb.IntSum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: exportKindToTemporality(ek),
|
||||
DataPoints: []*metricpb.IntDataPoint{
|
||||
{
|
||||
Value: num.CoerceToInt64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_DoubleSum{
|
||||
DoubleSum: &metricpb.DoubleSum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: exportKindToTemporality(ek),
|
||||
DataPoints: []*metricpb.DoubleDataPoint{
|
||||
{
|
||||
Value: num.CoerceToFloat64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
|
||||
// as discrete values.
|
||||
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum number.Number, count uint64, err error) {
|
||||
if min, err = a.Min(); err != nil {
|
||||
return
|
||||
}
|
||||
if max, err = a.Max(); err != nil {
|
||||
return
|
||||
}
|
||||
if sum, err = a.Sum(); err != nil {
|
||||
return
|
||||
}
|
||||
if count, err = a.Count(); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
|
||||
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
labels := record.Labels()
|
||||
min, max, sum, count, err := minMaxSumCountValues(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
buckets := []uint64{min.AsRaw(), max.AsRaw()}
|
||||
bounds := []float64{0.0, 100.0}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_IntHistogram{
|
||||
IntHistogram: &metricpb.IntHistogram{
|
||||
DataPoints: []*metricpb.IntHistogramDataPoint{
|
||||
{
|
||||
Sum: sum.CoerceToInt64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: buckets,
|
||||
ExplicitBounds: bounds,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_DoubleHistogram{
|
||||
DoubleHistogram: &metricpb.DoubleHistogram{
|
||||
DataPoints: []*metricpb.DoubleHistogramDataPoint{
|
||||
{
|
||||
Sum: sum.CoerceToFloat64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: buckets,
|
||||
ExplicitBounds: bounds,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
|
||||
var buckets aggregation.Buckets
|
||||
if buckets, err = a.Histogram(); err != nil {
|
||||
return
|
||||
}
|
||||
boundaries, counts = buckets.Boundaries, buckets.Counts
|
||||
if len(counts) != len(boundaries)+1 {
|
||||
err = ErrTransforming
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// histogram transforms a Histogram Aggregator into an OTLP Metric.
|
||||
func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Histogram) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
labels := record.Labels()
|
||||
boundaries, counts, err := histogramValues(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count, err := a.Count()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sum, err := a.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_IntHistogram{
|
||||
IntHistogram: &metricpb.IntHistogram{
|
||||
AggregationTemporality: exportKindToTemporality(ek),
|
||||
DataPoints: []*metricpb.IntHistogramDataPoint{
|
||||
{
|
||||
Sum: sum.CoerceToInt64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: counts,
|
||||
ExplicitBounds: boundaries,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_DoubleHistogram{
|
||||
DoubleHistogram: &metricpb.DoubleHistogram{
|
||||
AggregationTemporality: exportKindToTemporality(ek),
|
||||
DataPoints: []*metricpb.DoubleHistogramDataPoint{
|
||||
{
|
||||
Sum: sum.CoerceToFloat64(n),
|
||||
Labels: stringKeyValues(labels.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: counts,
|
||||
ExplicitBounds: boundaries,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// stringKeyValues transforms a label iterator into an OTLP StringKeyValues.
|
||||
func stringKeyValues(iter attribute.Iterator) []*commonpb.StringKeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]*commonpb.StringKeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
kv := iter.Label()
|
||||
result = append(result, &commonpb.StringKeyValue{
|
||||
Key: string(kv.Key),
|
||||
Value: kv.Value.Emit(),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
29
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/resource.go
generated
vendored
29
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/resource.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform
|
||||
|
||||
import (
|
||||
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Resource transforms a Resource into an OTLP Resource.
|
||||
func Resource(r *resource.Resource) *resourcepb.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
|
||||
}
|
218
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/span.go
generated
vendored
218
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/transform/span.go
generated
vendored
@ -1,218 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
maxMessageEventsPerSpan = 128
|
||||
)
|
||||
|
||||
// SpanData transforms a slice of SpanSnapshot into a slice of OTLP
|
||||
// ResourceSpans.
|
||||
func SpanData(sdl []*tracesdk.SpanSnapshot) []*tracepb.ResourceSpans {
|
||||
if len(sdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
|
||||
|
||||
type ilsKey struct {
|
||||
r attribute.Distinct
|
||||
il instrumentation.Library
|
||||
}
|
||||
ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans)
|
||||
|
||||
var resources int
|
||||
for _, sd := range sdl {
|
||||
if sd == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rKey := sd.Resource.Equivalent()
|
||||
iKey := ilsKey{
|
||||
r: rKey,
|
||||
il: sd.InstrumentationLibrary,
|
||||
}
|
||||
ils, iOk := ilsm[iKey]
|
||||
if !iOk {
|
||||
// Either the resource or instrumentation library were unknown.
|
||||
ils = &tracepb.InstrumentationLibrarySpans{
|
||||
InstrumentationLibrary: instrumentationLibrary(sd.InstrumentationLibrary),
|
||||
Spans: []*tracepb.Span{},
|
||||
}
|
||||
}
|
||||
ils.Spans = append(ils.Spans, span(sd))
|
||||
ilsm[iKey] = ils
|
||||
|
||||
rs, rOk := rsm[rKey]
|
||||
if !rOk {
|
||||
resources++
|
||||
// The resource was unknown.
|
||||
rs = &tracepb.ResourceSpans{
|
||||
Resource: Resource(sd.Resource),
|
||||
InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils},
|
||||
}
|
||||
rsm[rKey] = rs
|
||||
continue
|
||||
}
|
||||
|
||||
// The resource has been seen before. Check if the instrumentation
|
||||
// library lookup was unknown because if so we need to add it to the
|
||||
// ResourceSpans. Otherwise, the instrumentation library has already
|
||||
// been seen and the append we did above will be included it in the
|
||||
// InstrumentationLibrarySpans reference.
|
||||
if !iOk {
|
||||
rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils)
|
||||
}
|
||||
}
|
||||
|
||||
// Transform the categorized map into a slice
|
||||
rss := make([]*tracepb.ResourceSpans, 0, resources)
|
||||
for _, rs := range rsm {
|
||||
rss = append(rss, rs)
|
||||
}
|
||||
return rss
|
||||
}
|
||||
|
||||
// span transforms a Span into an OTLP span.
|
||||
func span(sd *tracesdk.SpanSnapshot) *tracepb.Span {
|
||||
if sd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tid := sd.SpanContext.TraceID()
|
||||
sid := sd.SpanContext.SpanID()
|
||||
|
||||
s := &tracepb.Span{
|
||||
TraceId: tid[:],
|
||||
SpanId: sid[:],
|
||||
TraceState: sd.SpanContext.TraceState().String(),
|
||||
Status: status(sd.StatusCode, sd.StatusMessage),
|
||||
StartTimeUnixNano: uint64(sd.StartTime.UnixNano()),
|
||||
EndTimeUnixNano: uint64(sd.EndTime.UnixNano()),
|
||||
Links: links(sd.Links),
|
||||
Kind: spanKind(sd.SpanKind),
|
||||
Name: sd.Name,
|
||||
Attributes: Attributes(sd.Attributes),
|
||||
Events: spanEvents(sd.MessageEvents),
|
||||
DroppedAttributesCount: uint32(sd.DroppedAttributeCount),
|
||||
DroppedEventsCount: uint32(sd.DroppedMessageEventCount),
|
||||
DroppedLinksCount: uint32(sd.DroppedLinkCount),
|
||||
}
|
||||
|
||||
if psid := sd.Parent.SpanID(); psid.IsValid() {
|
||||
s.ParentSpanId = psid[:]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// status transform a span code and message into an OTLP span status.
|
||||
func status(status codes.Code, message string) *tracepb.Status {
|
||||
var c tracepb.Status_StatusCode
|
||||
switch status {
|
||||
case codes.Error:
|
||||
c = tracepb.Status_STATUS_CODE_ERROR
|
||||
default:
|
||||
c = tracepb.Status_STATUS_CODE_OK
|
||||
}
|
||||
return &tracepb.Status{
|
||||
Code: c,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// links transforms span Links to OTLP span links.
|
||||
func links(links []trace.Link) []*tracepb.Span_Link {
|
||||
if len(links) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sl := make([]*tracepb.Span_Link, 0, len(links))
|
||||
for _, otLink := range links {
|
||||
// This redefinition is necessary to prevent otLink.*ID[:] copies
|
||||
// being reused -- in short we need a new otLink per iteration.
|
||||
otLink := otLink
|
||||
|
||||
tid := otLink.TraceID()
|
||||
sid := otLink.SpanID()
|
||||
|
||||
sl = append(sl, &tracepb.Span_Link{
|
||||
TraceId: tid[:],
|
||||
SpanId: sid[:],
|
||||
Attributes: Attributes(otLink.Attributes),
|
||||
})
|
||||
}
|
||||
return sl
|
||||
}
|
||||
|
||||
// spanEvents transforms span Events to an OTLP span events.
|
||||
func spanEvents(es []trace.Event) []*tracepb.Span_Event {
|
||||
if len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
evCount := len(es)
|
||||
if evCount > maxMessageEventsPerSpan {
|
||||
evCount = maxMessageEventsPerSpan
|
||||
}
|
||||
events := make([]*tracepb.Span_Event, 0, evCount)
|
||||
messageEvents := 0
|
||||
|
||||
// Transform message events
|
||||
for _, e := range es {
|
||||
if messageEvents >= maxMessageEventsPerSpan {
|
||||
break
|
||||
}
|
||||
messageEvents++
|
||||
events = append(events,
|
||||
&tracepb.Span_Event{
|
||||
Name: e.Name,
|
||||
TimeUnixNano: uint64(e.Time.UnixNano()),
|
||||
Attributes: Attributes(e.Attributes),
|
||||
// TODO (rghetia) : Add Drop Counts when supported.
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
// spanKind transforms a SpanKind to an OTLP span kind.
|
||||
func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
|
||||
switch kind {
|
||||
case trace.SpanKindInternal:
|
||||
return tracepb.Span_SPAN_KIND_INTERNAL
|
||||
case trace.SpanKindClient:
|
||||
return tracepb.Span_SPAN_KIND_CLIENT
|
||||
case trace.SpanKindServer:
|
||||
return tracepb.Span_SPAN_KIND_SERVER
|
||||
case trace.SpanKindProducer:
|
||||
return tracepb.Span_SPAN_KIND_PRODUCER
|
||||
case trace.SpanKindConsumer:
|
||||
return tracepb.Span_SPAN_KIND_CONSUMER
|
||||
default:
|
||||
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user