mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
201
vendor/go.opentelemetry.io/otel/sdk/export/metric/LICENSE
generated
vendored
201
vendor/go.opentelemetry.io/otel/sdk/export/metric/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
154
vendor/go.opentelemetry.io/otel/sdk/export/metric/aggregation/aggregation.go
generated
vendored
154
vendor/go.opentelemetry.io/otel/sdk/export/metric/aggregation/aggregation.go
generated
vendored
@ -1,154 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
)
|
||||
|
||||
// These interfaces describe the various ways to access state from an
|
||||
// Aggregation.
|
||||
|
||||
type (
|
||||
// Aggregation is an interface returned by the Aggregator
|
||||
// containing an interval of metric data.
|
||||
Aggregation interface {
|
||||
// Kind returns a short identifying string to identify
|
||||
// the Aggregator that was used to produce the
|
||||
// Aggregation (e.g., "Sum").
|
||||
Kind() Kind
|
||||
}
|
||||
|
||||
// Sum returns an aggregated sum.
|
||||
Sum interface {
|
||||
Aggregation
|
||||
Sum() (number.Number, error)
|
||||
}
|
||||
|
||||
// Count returns the number of values that were aggregated.
|
||||
Count interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
}
|
||||
|
||||
// Min returns the minimum value over the set of values that were aggregated.
|
||||
Min interface {
|
||||
Aggregation
|
||||
Min() (number.Number, error)
|
||||
}
|
||||
|
||||
// Max returns the maximum value over the set of values that were aggregated.
|
||||
Max interface {
|
||||
Aggregation
|
||||
Max() (number.Number, error)
|
||||
}
|
||||
|
||||
// LastValue returns the latest value that was aggregated.
|
||||
LastValue interface {
|
||||
Aggregation
|
||||
LastValue() (number.Number, time.Time, error)
|
||||
}
|
||||
|
||||
// Points returns the raw values that were aggregated.
|
||||
Points interface {
|
||||
Aggregation
|
||||
|
||||
// Points returns points in the order they were
|
||||
// recorded. Points are approximately ordered by
|
||||
// timestamp, but this is not guaranteed.
|
||||
Points() ([]Point, error)
|
||||
}
|
||||
|
||||
// Point is a raw data point, consisting of a number and value.
|
||||
Point struct {
|
||||
number.Number
|
||||
time.Time
|
||||
}
|
||||
|
||||
// Buckets represents histogram buckets boundaries and counts.
|
||||
//
|
||||
// For a Histogram with N defined boundaries, e.g, [x, y, z].
|
||||
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf]
|
||||
Buckets struct {
|
||||
// Boundaries are floating point numbers, even when
|
||||
// aggregating integers.
|
||||
Boundaries []float64
|
||||
|
||||
// Counts holds the count in each bucket.
|
||||
Counts []uint64
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
Histogram interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
Sum() (number.Number, error)
|
||||
Histogram() (Buckets, error)
|
||||
}
|
||||
|
||||
// MinMaxSumCount supports the Min, Max, Sum, and Count interfaces.
|
||||
MinMaxSumCount interface {
|
||||
Aggregation
|
||||
Min() (number.Number, error)
|
||||
Max() (number.Number, error)
|
||||
Sum() (number.Number, error)
|
||||
Count() (uint64, error)
|
||||
}
|
||||
)
|
||||
|
||||
type (
|
||||
// Kind is a short name for the Aggregator that produces an
|
||||
// Aggregation, used for descriptive purpose only. Kind is a
|
||||
// string to allow user-defined Aggregators.
|
||||
//
|
||||
// When deciding how to handle an Aggregation, Exporters are
|
||||
// encouraged to decide based on conversion to the above
|
||||
// interfaces based on strength, not on Kind value, when
|
||||
// deciding how to expose metric data. This enables
|
||||
// user-supplied Aggregators to replace builtin Aggregators.
|
||||
//
|
||||
// For example, test for a Distribution before testing for a
|
||||
// MinMaxSumCount, test for a Histogram before testing for a
|
||||
// Sum, and so on.
|
||||
Kind string
|
||||
)
|
||||
|
||||
const (
|
||||
SumKind Kind = "Sum"
|
||||
MinMaxSumCountKind Kind = "MinMaxSumCount"
|
||||
HistogramKind Kind = "Histogram"
|
||||
LastValueKind Kind = "Lastvalue"
|
||||
ExactKind Kind = "Exact"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
|
||||
ErrNaNInput = fmt.Errorf("NaN value is an invalid input")
|
||||
ErrInconsistentType = fmt.Errorf("inconsistent aggregator types")
|
||||
ErrNoSubtraction = fmt.Errorf("aggregator does not subtract")
|
||||
|
||||
// ErrNoData is returned when (due to a race with collection)
|
||||
// the Aggregator is check-pointed before the first value is set.
|
||||
// The aggregator should simply be skipped in this case.
|
||||
ErrNoData = fmt.Errorf("no data collected by this aggregator")
|
||||
)
|
||||
|
||||
// String returns the string value of Kind.
|
||||
func (k Kind) String() string {
|
||||
return string(k)
|
||||
}
|
25
vendor/go.opentelemetry.io/otel/sdk/export/metric/exportkind_string.go
generated
vendored
25
vendor/go.opentelemetry.io/otel/sdk/export/metric/exportkind_string.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
// Code generated by "stringer -type=ExportKind"; DO NOT EDIT.
|
||||
|
||||
package metric
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[CumulativeExportKind-1]
|
||||
_ = x[DeltaExportKind-2]
|
||||
}
|
||||
|
||||
const _ExportKind_name = "CumulativeExportKindDeltaExportKind"
|
||||
|
||||
var _ExportKind_index = [...]uint8{0, 20, 35}
|
||||
|
||||
func (i ExportKind) String() string {
|
||||
i -= 1
|
||||
if i < 0 || i >= ExportKind(len(_ExportKind_index)-1) {
|
||||
return "ExportKind(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
}
|
||||
return _ExportKind_name[_ExportKind_index[i]:_ExportKind_index[i+1]]
|
||||
}
|
445
vendor/go.opentelemetry.io/otel/sdk/export/metric/metric.go
generated
vendored
445
vendor/go.opentelemetry.io/otel/sdk/export/metric/metric.go
generated
vendored
@ -1,445 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=ExportKind
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Processor is responsible for deciding which kind of aggregation to
|
||||
// use (via AggregatorSelector), gathering exported results from the
|
||||
// SDK during collection, and deciding over which dimensions to group
|
||||
// the exported data.
|
||||
//
|
||||
// The SDK supports binding only one of these interfaces, as it has
|
||||
// the sole responsibility of determining which Aggregator to use for
|
||||
// each record.
|
||||
//
|
||||
// The embedded AggregatorSelector interface is called (concurrently)
|
||||
// in instrumentation context to select the appropriate Aggregator for
|
||||
// an instrument.
|
||||
//
|
||||
// The `Process` method is called during collection in a
|
||||
// single-threaded context from the SDK, after the aggregator is
|
||||
// checkpointed, allowing the processor to build the set of metrics
|
||||
// currently being exported.
|
||||
type Processor interface {
|
||||
// AggregatorSelector is responsible for selecting the
|
||||
// concrete type of Aggregator used for a metric in the SDK.
|
||||
//
|
||||
// This may be a static decision based on fields of the
|
||||
// Descriptor, or it could use an external configuration
|
||||
// source to customize the treatment of each metric
|
||||
// instrument.
|
||||
//
|
||||
// The result from AggregatorSelector.AggregatorFor should be
|
||||
// the same type for a given Descriptor or else nil. The same
|
||||
// type should be returned for a given descriptor, because
|
||||
// Aggregators only know how to Merge with their own type. If
|
||||
// the result is nil, the metric instrument will be disabled.
|
||||
//
|
||||
// Note that the SDK only calls AggregatorFor when new records
|
||||
// require an Aggregator. This does not provide a way to
|
||||
// disable metrics with active records.
|
||||
AggregatorSelector
|
||||
|
||||
// Process is called by the SDK once per internal record,
|
||||
// passing the export Accumulation (a Descriptor, the corresponding
|
||||
// Labels, and the checkpointed Aggregator). This call has no
|
||||
// Context argument because it is expected to perform only
|
||||
// computation. An SDK is not expected to call exporters from
|
||||
// with Process, use a controller for that (see
|
||||
// ./controllers/{pull,push}.
|
||||
Process(accum Accumulation) error
|
||||
}
|
||||
|
||||
// AggregatorSelector supports selecting the kind of Aggregator to
|
||||
// use at runtime for a specific metric instrument.
|
||||
type AggregatorSelector interface {
|
||||
// AggregatorFor allocates a variable number of aggregators of
|
||||
// a kind suitable for the requested export. This method
|
||||
// initializes a `...*Aggregator`, to support making a single
|
||||
// allocation.
|
||||
//
|
||||
// When the call returns without initializing the *Aggregator
|
||||
// to a non-nil value, the metric instrument is explicitly
|
||||
// disabled.
|
||||
//
|
||||
// This must return a consistent type to avoid confusion in
|
||||
// later stages of the metrics export process, i.e., when
|
||||
// Merging or Checkpointing aggregators for a specific
|
||||
// instrument.
|
||||
//
|
||||
// Note: This is context-free because the aggregator should
|
||||
// not relate to the incoming context. This call should not
|
||||
// block.
|
||||
AggregatorFor(descriptor *metric.Descriptor, aggregator ...*Aggregator)
|
||||
}
|
||||
|
||||
// Checkpointer is the interface used by a Controller to coordinate
|
||||
// the Processor with Accumulator(s) and Exporter(s). The
|
||||
// StartCollection() and FinishCollection() methods start and finish a
|
||||
// collection interval. Controllers call the Accumulator(s) during
|
||||
// collection to process Accumulations.
|
||||
type Checkpointer interface {
|
||||
// Processor processes metric data for export. The Process
|
||||
// method is bracketed by StartCollection and FinishCollection
|
||||
// calls. The embedded AggregatorSelector can be called at
|
||||
// any time.
|
||||
Processor
|
||||
|
||||
// CheckpointSet returns the current data set. This may be
|
||||
// called before and after collection. The
|
||||
// implementation is required to return the same value
|
||||
// throughout its lifetime, since CheckpointSet exposes a
|
||||
// sync.Locker interface. The caller is responsible for
|
||||
// locking the CheckpointSet before initiating collection.
|
||||
CheckpointSet() CheckpointSet
|
||||
|
||||
// StartCollection begins a collection interval.
|
||||
StartCollection()
|
||||
|
||||
// FinishCollection ends a collection interval.
|
||||
FinishCollection() error
|
||||
}
|
||||
|
||||
// Aggregator implements a specific aggregation behavior, e.g., a
|
||||
// behavior to track a sequence of updates to an instrument. Sum-only
|
||||
// instruments commonly use a simple Sum aggregator, but for the
|
||||
// distribution instruments (ValueRecorder, ValueObserver) there are a
|
||||
// number of possible aggregators with different cost and accuracy
|
||||
// tradeoffs.
|
||||
//
|
||||
// Note that any Aggregator may be attached to any instrument--this is
|
||||
// the result of the OpenTelemetry API/SDK separation. It is possible
|
||||
// to attach a Sum aggregator to a ValueRecorder instrument or a
|
||||
// MinMaxSumCount aggregator to a Counter instrument.
|
||||
type Aggregator interface {
|
||||
// Aggregation returns an Aggregation interface to access the
|
||||
// current state of this Aggregator. The caller is
|
||||
// responsible for synchronization and must not call any the
|
||||
// other methods in this interface concurrently while using
|
||||
// the Aggregation.
|
||||
Aggregation() aggregation.Aggregation
|
||||
|
||||
// Update receives a new measured value and incorporates it
|
||||
// into the aggregation. Update() calls may be called
|
||||
// concurrently.
|
||||
//
|
||||
// Descriptor.NumberKind() should be consulted to determine
|
||||
// whether the provided number is an int64 or float64.
|
||||
//
|
||||
// The Context argument comes from user-level code and could be
|
||||
// inspected for a `correlation.Map` or `trace.SpanContext`.
|
||||
Update(ctx context.Context, number number.Number, descriptor *metric.Descriptor) error
|
||||
|
||||
// SynchronizedMove is called during collection to finish one
|
||||
// period of aggregation by atomically saving the
|
||||
// currently-updating state into the argument Aggregator AND
|
||||
// resetting the current value to the zero state.
|
||||
//
|
||||
// SynchronizedMove() is called concurrently with Update(). These
|
||||
// two methods must be synchronized with respect to each
|
||||
// other, for correctness.
|
||||
//
|
||||
// After saving a synchronized copy, the Aggregator can be converted
|
||||
// into one or more of the interfaces in the `aggregation` sub-package,
|
||||
// according to kind of Aggregator that was selected.
|
||||
//
|
||||
// This method will return an InconsistentAggregatorError if
|
||||
// this Aggregator cannot be copied into the destination due
|
||||
// to an incompatible type.
|
||||
//
|
||||
// This call has no Context argument because it is expected to
|
||||
// perform only computation.
|
||||
//
|
||||
// When called with a nil `destination`, this Aggregator is reset
|
||||
// and the current value is discarded.
|
||||
SynchronizedMove(destination Aggregator, descriptor *metric.Descriptor) error
|
||||
|
||||
// Merge combines the checkpointed state from the argument
|
||||
// Aggregator into this Aggregator. Merge is not synchronized
|
||||
// with respect to Update or SynchronizedMove.
|
||||
//
|
||||
// The owner of an Aggregator being merged is responsible for
|
||||
// synchronization of both Aggregator states.
|
||||
Merge(aggregator Aggregator, descriptor *metric.Descriptor) error
|
||||
}
|
||||
|
||||
// Subtractor is an optional interface implemented by some
|
||||
// Aggregators. An Aggregator must support `Subtract()` in order to
|
||||
// be configured for a Precomputed-Sum instrument (SumObserver,
|
||||
// UpDownSumObserver) using a DeltaExporter.
|
||||
type Subtractor interface {
|
||||
// Subtract subtracts the `operand` from this Aggregator and
|
||||
// outputs the value in `result`.
|
||||
Subtract(operand, result Aggregator, descriptor *metric.Descriptor) error
|
||||
}
|
||||
|
||||
// Exporter handles presentation of the checkpoint of aggregate
|
||||
// metrics. This is the final stage of a metrics export pipeline,
|
||||
// where metric data are formatted for a specific system.
|
||||
type Exporter interface {
|
||||
// Export is called immediately after completing a collection
|
||||
// pass in the SDK.
|
||||
//
|
||||
// The Context comes from the controller that initiated
|
||||
// collection.
|
||||
//
|
||||
// The CheckpointSet interface refers to the Processor that just
|
||||
// completed collection.
|
||||
Export(ctx context.Context, checkpointSet CheckpointSet) error
|
||||
|
||||
// ExportKindSelector is an interface used by the Processor
|
||||
// in deciding whether to compute Delta or Cumulative
|
||||
// Aggregations when passing Records to this Exporter.
|
||||
ExportKindSelector
|
||||
}
|
||||
|
||||
// ExportKindSelector is a sub-interface of Exporter used to indicate
|
||||
// whether the Processor should compute Delta or Cumulative
|
||||
// Aggregations.
|
||||
type ExportKindSelector interface {
|
||||
// ExportKindFor should return the correct ExportKind that
|
||||
// should be used when exporting data for the given metric
|
||||
// instrument and Aggregator kind.
|
||||
ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind
|
||||
}
|
||||
|
||||
// CheckpointSet allows a controller to access a complete checkpoint of
|
||||
// aggregated metrics from the Processor. This is passed to the
|
||||
// Exporter which may then use ForEach to iterate over the collection
|
||||
// of aggregated metrics.
|
||||
type CheckpointSet interface {
|
||||
// ForEach iterates over aggregated checkpoints for all
|
||||
// metrics that were updated during the last collection
|
||||
// period. Each aggregated checkpoint returned by the
|
||||
// function parameter may return an error.
|
||||
//
|
||||
// The ExportKindSelector argument is used to determine
|
||||
// whether the Record is computed using Delta or Cumulative
|
||||
// aggregation.
|
||||
//
|
||||
// ForEach tolerates ErrNoData silently, as this is
|
||||
// expected from the Meter implementation. Any other kind
|
||||
// of error will immediately halt ForEach and return
|
||||
// the error to the caller.
|
||||
ForEach(kindSelector ExportKindSelector, recordFunc func(Record) error) error
|
||||
|
||||
// Locker supports locking the checkpoint set. Collection
|
||||
// into the checkpoint set cannot take place (in case of a
|
||||
// stateful processor) while it is locked.
|
||||
//
|
||||
// The Processor attached to the Accumulator MUST be called
|
||||
// with the lock held.
|
||||
sync.Locker
|
||||
|
||||
// RLock acquires a read lock corresponding to this Locker.
|
||||
RLock()
|
||||
// RUnlock releases a read lock corresponding to this Locker.
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
// Metadata contains the common elements for exported metric data that
|
||||
// are shared by the Accumulator->Processor and Processor->Exporter
|
||||
// steps.
|
||||
type Metadata struct {
|
||||
descriptor *metric.Descriptor
|
||||
labels *attribute.Set
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
// Accumulation contains the exported data for a single metric instrument
|
||||
// and label set, as prepared by an Accumulator for the Processor.
|
||||
type Accumulation struct {
|
||||
Metadata
|
||||
aggregator Aggregator
|
||||
}
|
||||
|
||||
// Record contains the exported data for a single metric instrument
|
||||
// and label set, as prepared by the Processor for the Exporter.
|
||||
// This includes the effective start and end time for the aggregation.
|
||||
type Record struct {
|
||||
Metadata
|
||||
aggregation aggregation.Aggregation
|
||||
start time.Time
|
||||
end time.Time
|
||||
}
|
||||
|
||||
// Descriptor describes the metric instrument being exported.
|
||||
func (m Metadata) Descriptor() *metric.Descriptor {
|
||||
return m.descriptor
|
||||
}
|
||||
|
||||
// Labels describes the labels associated with the instrument and the
|
||||
// aggregated data.
|
||||
func (m Metadata) Labels() *attribute.Set {
|
||||
return m.labels
|
||||
}
|
||||
|
||||
// Resource contains common attributes that apply to this metric event.
|
||||
func (m Metadata) Resource() *resource.Resource {
|
||||
return m.resource
|
||||
}
|
||||
|
||||
// NewAccumulation allows Accumulator implementations to construct new
|
||||
// Accumulations to send to Processors. The Descriptor, Labels, Resource,
|
||||
// and Aggregator represent aggregate metric events received over a single
|
||||
// collection period.
|
||||
func NewAccumulation(descriptor *metric.Descriptor, labels *attribute.Set, resource *resource.Resource, aggregator Aggregator) Accumulation {
|
||||
return Accumulation{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
labels: labels,
|
||||
resource: resource,
|
||||
},
|
||||
aggregator: aggregator,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregator returns the checkpointed aggregator. It is safe to
|
||||
// access the checkpointed state without locking.
|
||||
func (r Accumulation) Aggregator() Aggregator {
|
||||
return r.aggregator
|
||||
}
|
||||
|
||||
// NewRecord allows Processor implementations to construct export
|
||||
// records. The Descriptor, Labels, and Aggregator represent
|
||||
// aggregate metric events received over a single collection period.
|
||||
func NewRecord(descriptor *metric.Descriptor, labels *attribute.Set, resource *resource.Resource, aggregation aggregation.Aggregation, start, end time.Time) Record {
|
||||
return Record{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
labels: labels,
|
||||
resource: resource,
|
||||
},
|
||||
aggregation: aggregation,
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation returns the aggregation, an interface to the record and
|
||||
// its aggregator, dependent on the kind of both the input and exporter.
|
||||
func (r Record) Aggregation() aggregation.Aggregation {
|
||||
return r.aggregation
|
||||
}
|
||||
|
||||
// StartTime is the start time of the interval covered by this aggregation.
|
||||
func (r Record) StartTime() time.Time {
|
||||
return r.start
|
||||
}
|
||||
|
||||
// EndTime is the end time of the interval covered by this aggregation.
|
||||
func (r Record) EndTime() time.Time {
|
||||
return r.end
|
||||
}
|
||||
|
||||
// ExportKind indicates the kind of data exported by an exporter.
|
||||
// These bits may be OR-d together when multiple exporters are in use.
|
||||
type ExportKind int
|
||||
|
||||
const (
|
||||
// CumulativeExportKind indicates that an Exporter expects a
|
||||
// Cumulative Aggregation.
|
||||
CumulativeExportKind ExportKind = 1
|
||||
|
||||
// DeltaExportKind indicates that an Exporter expects a
|
||||
// Delta Aggregation.
|
||||
DeltaExportKind ExportKind = 2
|
||||
)
|
||||
|
||||
// Includes tests whether `kind` includes a specific kind of
|
||||
// exporter.
|
||||
func (kind ExportKind) Includes(has ExportKind) bool {
|
||||
return kind&has != 0
|
||||
}
|
||||
|
||||
// MemoryRequired returns whether an exporter of this kind requires
|
||||
// memory to export correctly.
|
||||
func (kind ExportKind) MemoryRequired(mkind metric.InstrumentKind) bool {
|
||||
switch mkind {
|
||||
case metric.ValueRecorderInstrumentKind, metric.ValueObserverInstrumentKind,
|
||||
metric.CounterInstrumentKind, metric.UpDownCounterInstrumentKind:
|
||||
// Delta-oriented instruments:
|
||||
return kind.Includes(CumulativeExportKind)
|
||||
|
||||
case metric.SumObserverInstrumentKind, metric.UpDownSumObserverInstrumentKind:
|
||||
// Cumulative-oriented instruments:
|
||||
return kind.Includes(DeltaExportKind)
|
||||
}
|
||||
// Something unexpected is happening--we could panic. This
|
||||
// will become an error when the exporter tries to access a
|
||||
// checkpoint, presumably, so let it be.
|
||||
return false
|
||||
}
|
||||
|
||||
type (
|
||||
constantExportKindSelector ExportKind
|
||||
statelessExportKindSelector struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
_ ExportKindSelector = constantExportKindSelector(0)
|
||||
_ ExportKindSelector = statelessExportKindSelector{}
|
||||
)
|
||||
|
||||
// ConstantExportKindSelector returns an ExportKindSelector that returns
|
||||
// a constant ExportKind, one that is either always cumulative or always delta.
|
||||
func ConstantExportKindSelector(kind ExportKind) ExportKindSelector {
|
||||
return constantExportKindSelector(kind)
|
||||
}
|
||||
|
||||
// CumulativeExportKindSelector returns an ExportKindSelector that
|
||||
// always returns CumulativeExportKind.
|
||||
func CumulativeExportKindSelector() ExportKindSelector {
|
||||
return ConstantExportKindSelector(CumulativeExportKind)
|
||||
}
|
||||
|
||||
// DeltaExportKindSelector returns an ExportKindSelector that
|
||||
// always returns DeltaExportKind.
|
||||
func DeltaExportKindSelector() ExportKindSelector {
|
||||
return ConstantExportKindSelector(DeltaExportKind)
|
||||
}
|
||||
|
||||
// StatelessExportKindSelector returns an ExportKindSelector that
|
||||
// always returns the ExportKind that avoids long-term memory
|
||||
// requirements.
|
||||
func StatelessExportKindSelector() ExportKindSelector {
|
||||
return statelessExportKindSelector{}
|
||||
}
|
||||
|
||||
// ExportKindFor implements ExportKindSelector.
|
||||
func (c constantExportKindSelector) ExportKindFor(_ *metric.Descriptor, _ aggregation.Kind) ExportKind {
|
||||
return ExportKind(c)
|
||||
}
|
||||
|
||||
// ExportKindFor implements ExportKindSelector.
|
||||
func (s statelessExportKindSelector) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) ExportKind {
|
||||
if kind == aggregation.SumKind && desc.InstrumentKind().PrecomputedSum() {
|
||||
return CumulativeExportKind
|
||||
}
|
||||
return DeltaExportKind
|
||||
}
|
24
vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
generated
vendored
Normal file
24
vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package instrumentation provides types to represent the code libraries that
|
||||
// provide OpenTelemetry instrumentation. These types are used in the
|
||||
// OpenTelemetry signal pipelines to identify the source of telemetry.
|
||||
//
|
||||
// See
|
||||
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
|
||||
// and
|
||||
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
|
||||
// for more information.
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
20
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
20
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
@ -12,24 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package instrumentation provides an instrumentation library structure to be
|
||||
passed to both the OpenTelemetry Tracer and Meter components.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
|
||||
For more information see
|
||||
[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md).
|
||||
*/
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
// Library represents the instrumentation library.
|
||||
type Library struct {
|
||||
// Name is the name of the instrumentation library. This should be the
|
||||
// Go package name of that library.
|
||||
Name string
|
||||
// Version is the version of the instrumentation library.
|
||||
Version string
|
||||
}
|
||||
// Deprecated: please use Scope instead.
|
||||
type Library = Scope
|
||||
|
@ -12,13 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func AtomicFieldOffsets() map[string]uintptr {
|
||||
return map[string]uintptr{
|
||||
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),
|
||||
"record.updateCount": unsafe.Offsetof(record{}.updateCount),
|
||||
}
|
||||
// Scope represents the instrumentation scope.
|
||||
type Scope struct {
|
||||
// Name is the name of the instrumentation scope. This should be the
|
||||
// Go package name of that scope.
|
||||
Name string
|
||||
// Version is the version of the instrumentation scope.
|
||||
Version string
|
||||
// SchemaURL of the telemetry emitted by the scope.
|
||||
SchemaURL string
|
||||
}
|
177
vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
generated
vendored
Normal file
177
vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package env // import "go.opentelemetry.io/otel/sdk/internal/env"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
|
||||
// Environment variable names.
|
||||
const (
|
||||
// BatchSpanProcessorScheduleDelayKey is the delay interval between two
|
||||
// consecutive exports (i.e. 5000).
|
||||
BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
|
||||
// BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
|
||||
// export data (i.e. 3000).
|
||||
BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
|
||||
// BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
|
||||
BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
|
||||
// BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
|
||||
// 512). Note: it must be less than or equal to
|
||||
// EnvBatchSpanProcessorMaxQueueSize.
|
||||
BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
|
||||
|
||||
// AttributeValueLengthKey is the maximum allowed attribute value size.
|
||||
AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
|
||||
// AttributeCountKey is the maximum allowed span attribute count.
|
||||
AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
|
||||
|
||||
// SpanAttributeValueLengthKey is the maximum allowed attribute value size
|
||||
// for a span.
|
||||
SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
|
||||
// SpanAttributeCountKey is the maximum allowed span attribute count for a
|
||||
// span.
|
||||
SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
|
||||
|
||||
// SpanEventCountKey is the maximum allowed span event count.
|
||||
SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
|
||||
|
||||
// SpanEventAttributeCountKey is the maximum allowed attribute per span
|
||||
// event count.
|
||||
SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
|
||||
|
||||
// SpanLinkCountKey is the maximum allowed span link count.
|
||||
SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
|
||||
|
||||
// SpanLinkAttributeCountKey is the maximum allowed attribute per span
|
||||
// link count.
|
||||
SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
|
||||
)
|
||||
|
||||
// firstInt returns the value of the first matching environment variable from
|
||||
// keys. If the value is not an integer or no match is found, defaultValue is
|
||||
// returned.
|
||||
func firstInt(defaultValue int, keys ...string) int {
|
||||
for _, key := range keys {
|
||||
value, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
global.Info("Got invalid value, number value expected.", key, value)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return intValue
|
||||
}
|
||||
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// IntEnvOr returns the int value of the environment variable with name key if
|
||||
// it exists and the value is an int. Otherwise, defaultValue is returned.
|
||||
func IntEnvOr(key string, defaultValue int) int {
|
||||
value, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
global.Info("Got invalid value, number value expected.", key, value)
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return intValue
|
||||
}
|
||||
|
||||
// BatchSpanProcessorScheduleDelay returns the environment variable value for
|
||||
// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func BatchSpanProcessorScheduleDelay(defaultValue int) int {
|
||||
return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
|
||||
}
|
||||
|
||||
// BatchSpanProcessorExportTimeout returns the environment variable value for
|
||||
// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func BatchSpanProcessorExportTimeout(defaultValue int) int {
|
||||
return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
|
||||
}
|
||||
|
||||
// BatchSpanProcessorMaxQueueSize returns the environment variable value for
|
||||
// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
|
||||
return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
|
||||
}
|
||||
|
||||
// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
|
||||
// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
|
||||
// is returned.
|
||||
func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
|
||||
return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
|
||||
}
|
||||
|
||||
// SpanAttributeValueLength returns the environment variable value for the
|
||||
// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
|
||||
// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
|
||||
// returned or defaultValue if that is not set.
|
||||
func SpanAttributeValueLength(defaultValue int) int {
|
||||
return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
|
||||
}
|
||||
|
||||
// SpanAttributeCount returns the environment variable value for the
|
||||
// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
|
||||
// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
|
||||
// defaultValue if that is not set.
|
||||
func SpanAttributeCount(defaultValue int) int {
|
||||
return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
|
||||
}
|
||||
|
||||
// SpanEventCount returns the environment variable value for the
|
||||
// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func SpanEventCount(defaultValue int) int {
|
||||
return IntEnvOr(SpanEventCountKey, defaultValue)
|
||||
}
|
||||
|
||||
// SpanEventAttributeCount returns the environment variable value for the
|
||||
// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
|
||||
// is returned.
|
||||
func SpanEventAttributeCount(defaultValue int) int {
|
||||
return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
|
||||
}
|
||||
|
||||
// SpanLinkCount returns the environment variable value for the
|
||||
// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func SpanLinkCount(defaultValue int) int {
|
||||
return IntEnvOr(SpanLinkCountKey, defaultValue)
|
||||
}
|
||||
|
||||
// SpanLinkAttributeCount returns the environment variable value for the
|
||||
// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||
// returned.
|
||||
func SpanLinkAttributeCount(defaultValue int) int {
|
||||
return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
|
||||
}
|
50
vendor/go.opentelemetry.io/otel/sdk/internal/sanitize.go
generated
vendored
50
vendor/go.opentelemetry.io/otel/sdk/internal/sanitize.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const labelKeySizeLimit = 100
|
||||
|
||||
// Sanitize returns a string that is trunacated to 100 characters if it's too
|
||||
// long, and replaces non-alphanumeric characters to underscores.
|
||||
func Sanitize(s string) string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
if len(s) > labelKeySizeLimit {
|
||||
s = s[:labelKeySizeLimit]
|
||||
}
|
||||
s = strings.Map(sanitizeRune, s)
|
||||
if unicode.IsDigit(rune(s[0])) {
|
||||
s = "key_" + s
|
||||
}
|
||||
if s[0] == '_' {
|
||||
s = "key" + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// converts anything that is not a letter or digit to an underscore
|
||||
func sanitizeRune(r rune) rune {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||
return r
|
||||
}
|
||||
// Everything else turns into an underscore
|
||||
return '_'
|
||||
}
|
201
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
201
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
52
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/aggregator.go
generated
vendored
52
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/aggregator.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
)
|
||||
|
||||
// NewInconsistentAggregatorError formats an error describing an attempt to
|
||||
// Checkpoint or Merge different-type aggregators. The result can be unwrapped as
|
||||
// an ErrInconsistentType.
|
||||
func NewInconsistentAggregatorError(a1, a2 export.Aggregator) error {
|
||||
return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2)
|
||||
}
|
||||
|
||||
// RangeTest is a common routine for testing for valid input values.
|
||||
// This rejects NaN values. This rejects negative values when the
|
||||
// metric instrument does not support negative values, including
|
||||
// monotonic counter metrics and absolute ValueRecorder metrics.
|
||||
func RangeTest(num number.Number, descriptor *metric.Descriptor) error {
|
||||
numberKind := descriptor.NumberKind()
|
||||
|
||||
if numberKind == number.Float64Kind && math.IsNaN(num.AsFloat64()) {
|
||||
return aggregation.ErrNaNInput
|
||||
}
|
||||
|
||||
switch descriptor.InstrumentKind() {
|
||||
case metric.CounterInstrumentKind, metric.SumObserverInstrumentKind:
|
||||
if num.IsNegative(numberKind) {
|
||||
return aggregation.ErrNegativeInput
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
130
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/exact/exact.go
generated
vendored
130
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/exact/exact.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exact // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
)
|
||||
|
||||
type (
|
||||
// Aggregator aggregates events that form a distribution, keeping
|
||||
// an array with the exact set of values.
|
||||
Aggregator struct {
|
||||
lock sync.Mutex
|
||||
samples []aggregation.Point
|
||||
}
|
||||
)
|
||||
|
||||
var _ export.Aggregator = &Aggregator{}
|
||||
var _ aggregation.Points = &Aggregator{}
|
||||
var _ aggregation.Count = &Aggregator{}
|
||||
|
||||
// New returns cnt many new exact aggregators, which aggregate recorded
|
||||
// measurements by storing them in an array. This type uses a mutex
|
||||
// for Update() and SynchronizedMove() concurrency.
|
||||
func New(cnt int) []Aggregator {
|
||||
return make([]Aggregator, cnt)
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.ExactKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.ExactKind
|
||||
}
|
||||
|
||||
// Count returns the number of values in the checkpoint.
|
||||
func (c *Aggregator) Count() (uint64, error) {
|
||||
return uint64(len(c.samples)), nil
|
||||
}
|
||||
|
||||
// Points returns access to the raw data set.
|
||||
func (c *Aggregator) Points() ([]aggregation.Point, error) {
|
||||
return c.samples, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove saves the current state to oa and resets the current state to
|
||||
// the empty set, taking a lock to prevent concurrent Update() calls.
|
||||
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
|
||||
if oa != nil && o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if o != nil {
|
||||
o.samples = c.samples
|
||||
}
|
||||
c.samples = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update adds the recorded measurement to the current data set.
|
||||
// Update takes a lock to prevent concurrent Update() and SynchronizedMove()
|
||||
// calls.
|
||||
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
|
||||
now := time.Now()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.samples = append(c.samples, aggregation.Point{
|
||||
Number: number,
|
||||
Time: now,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two data sets into one.
|
||||
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
c.samples = combine(c.samples, o.samples)
|
||||
return nil
|
||||
}
|
||||
|
||||
func combine(a, b []aggregation.Point) []aggregation.Point {
|
||||
result := make([]aggregation.Point, 0, len(a)+len(b))
|
||||
|
||||
for len(a) != 0 && len(b) != 0 {
|
||||
if a[0].Time.Before(b[0].Time) {
|
||||
result = append(result, a[0])
|
||||
a = a[1:]
|
||||
} else {
|
||||
result = append(result, b[0])
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
result = append(result, a...)
|
||||
result = append(result, b...)
|
||||
return result
|
||||
}
|
270
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/histogram/histogram.go
generated
vendored
270
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/histogram/histogram.go
generated
vendored
@ -1,270 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package histogram // import "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
)
|
||||
|
||||
// Note: This code uses a Mutex to govern access to the exclusive
|
||||
// aggregator state. This is in contrast to a lock-free approach
|
||||
// (as in the Go prometheus client) that was reverted here:
|
||||
// https://github.com/open-telemetry/opentelemetry-go/pull/669
|
||||
|
||||
type (
|
||||
// Aggregator observe events and counts them in pre-determined buckets.
|
||||
// It also calculates the sum and count of all events.
|
||||
Aggregator struct {
|
||||
lock sync.Mutex
|
||||
boundaries []float64
|
||||
kind number.Kind
|
||||
state *state
|
||||
}
|
||||
|
||||
// config describes how the histogram is aggregated.
|
||||
config struct {
|
||||
// explicitBoundaries support arbitrary bucketing schemes. This
|
||||
// is the general case.
|
||||
explicitBoundaries []float64
|
||||
}
|
||||
|
||||
// Option configures a histogram config.
|
||||
Option interface {
|
||||
// apply sets one or more config fields.
|
||||
apply(*config)
|
||||
}
|
||||
|
||||
// state represents the state of a histogram, consisting of
|
||||
// the sum and counts for all observed values and
|
||||
// the less than equal bucket count for the pre-determined boundaries.
|
||||
state struct {
|
||||
bucketCounts []uint64
|
||||
sum number.Number
|
||||
count uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithExplicitBoundaries sets the ExplicitBoundaries configuration option of a config.
|
||||
func WithExplicitBoundaries(explicitBoundaries []float64) Option {
|
||||
return explicitBoundariesOption{explicitBoundaries}
|
||||
}
|
||||
|
||||
type explicitBoundariesOption struct {
|
||||
boundaries []float64
|
||||
}
|
||||
|
||||
func (o explicitBoundariesOption) apply(config *config) {
|
||||
config.explicitBoundaries = o.boundaries
|
||||
}
|
||||
|
||||
// defaultExplicitBoundaries have been copied from prometheus.DefBuckets.
|
||||
//
|
||||
// Note we anticipate the use of a high-precision histogram sketch as
|
||||
// the standard histogram aggregator for OTLP export.
|
||||
// (https://github.com/open-telemetry/opentelemetry-specification/issues/982).
|
||||
var defaultFloat64ExplicitBoundaries = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
// defaultInt64ExplicitBoundaryMultiplier determines the default
|
||||
// integer histogram boundaries.
|
||||
const defaultInt64ExplicitBoundaryMultiplier = 1e6
|
||||
|
||||
// defaultInt64ExplicitBoundaries applies a multiplier to the default
|
||||
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ]
|
||||
var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
|
||||
for _, f := range bounds {
|
||||
asint = append(asint, defaultInt64ExplicitBoundaryMultiplier*f)
|
||||
}
|
||||
return
|
||||
}(defaultFloat64ExplicitBoundaries)
|
||||
|
||||
var _ export.Aggregator = &Aggregator{}
|
||||
var _ aggregation.Sum = &Aggregator{}
|
||||
var _ aggregation.Count = &Aggregator{}
|
||||
var _ aggregation.Histogram = &Aggregator{}
|
||||
|
||||
// New returns a new aggregator for computing Histograms.
|
||||
//
|
||||
// A Histogram observe events and counts them in pre-defined buckets.
|
||||
// And also provides the total sum and count of all observations.
|
||||
//
|
||||
// Note that this aggregator maintains each value using independent
|
||||
// atomic operations, which introduces the possibility that
|
||||
// checkpoints are inconsistent.
|
||||
func New(cnt int, desc *metric.Descriptor, opts ...Option) []Aggregator {
|
||||
var cfg config
|
||||
|
||||
if desc.NumberKind() == number.Int64Kind {
|
||||
cfg.explicitBoundaries = defaultInt64ExplicitBoundaries
|
||||
} else {
|
||||
cfg.explicitBoundaries = defaultFloat64ExplicitBoundaries
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cfg)
|
||||
}
|
||||
|
||||
aggs := make([]Aggregator, cnt)
|
||||
|
||||
// Boundaries MUST be ordered otherwise the histogram could not
|
||||
// be properly computed.
|
||||
sortedBoundaries := make([]float64, len(cfg.explicitBoundaries))
|
||||
|
||||
copy(sortedBoundaries, cfg.explicitBoundaries)
|
||||
sort.Float64s(sortedBoundaries)
|
||||
|
||||
for i := range aggs {
|
||||
aggs[i] = Aggregator{
|
||||
kind: desc.NumberKind(),
|
||||
boundaries: sortedBoundaries,
|
||||
}
|
||||
aggs[i].state = aggs[i].newState()
|
||||
}
|
||||
return aggs
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.HistogramKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.HistogramKind
|
||||
}
|
||||
|
||||
// Sum returns the sum of all values in the checkpoint.
|
||||
func (c *Aggregator) Sum() (number.Number, error) {
|
||||
return c.state.sum, nil
|
||||
}
|
||||
|
||||
// Count returns the number of values in the checkpoint.
|
||||
func (c *Aggregator) Count() (uint64, error) {
|
||||
return c.state.count, nil
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
func (c *Aggregator) Histogram() (aggregation.Buckets, error) {
|
||||
return aggregation.Buckets{
|
||||
Boundaries: c.boundaries,
|
||||
Counts: c.state.bucketCounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove saves the current state into oa and resets the current state to
|
||||
// the empty set. Since no locks are taken, there is a chance that
|
||||
// the independent Sum, Count and Bucket Count are not consistent with each
|
||||
// other.
|
||||
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
|
||||
if oa != nil && o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
if o != nil {
|
||||
// Swap case: This is the ordinary case for a
|
||||
// synchronous instrument, where the SDK allocates two
|
||||
// Aggregators and lock contention is anticipated.
|
||||
// Reset the target state before swapping it under the
|
||||
// lock below.
|
||||
o.clearState()
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
if o != nil {
|
||||
c.state, o.state = o.state, c.state
|
||||
} else {
|
||||
// No swap case: This is the ordinary case for an
|
||||
// asynchronous instrument, where the SDK allocates a
|
||||
// single Aggregator and there is no anticipated lock
|
||||
// contention.
|
||||
c.clearState()
|
||||
}
|
||||
c.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Aggregator) newState() *state {
|
||||
return &state{
|
||||
bucketCounts: make([]uint64, len(c.boundaries)+1),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Aggregator) clearState() {
|
||||
for i := range c.state.bucketCounts {
|
||||
c.state.bucketCounts[i] = 0
|
||||
}
|
||||
c.state.sum = 0
|
||||
c.state.count = 0
|
||||
}
|
||||
|
||||
// Update adds the recorded measurement to the current data set.
|
||||
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
|
||||
kind := desc.NumberKind()
|
||||
asFloat := number.CoerceToFloat64(kind)
|
||||
|
||||
bucketID := len(c.boundaries)
|
||||
for i, boundary := range c.boundaries {
|
||||
if asFloat < boundary {
|
||||
bucketID = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// Note: Binary-search was compared using the benchmarks. The following
|
||||
// code is equivalent to the linear search above:
|
||||
//
|
||||
// bucketID := sort.Search(len(c.boundaries), func(i int) bool {
|
||||
// return asFloat < c.boundaries[i]
|
||||
// })
|
||||
//
|
||||
// The binary search wins for very large boundary sets, but
|
||||
// the linear search performs better up through arrays between
|
||||
// 256 and 512 elements, which is a relatively large histogram, so we
|
||||
// continue to prefer linear search.
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.state.count++
|
||||
c.state.sum.AddNumber(kind, number)
|
||||
c.state.bucketCounts[bucketID]++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two histograms that have the same buckets into a single one.
|
||||
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
c.state.sum.AddNumber(desc.NumberKind(), o.state.sum)
|
||||
c.state.count += o.state.count
|
||||
|
||||
for i := 0; i < len(c.state.bucketCounts); i++ {
|
||||
c.state.bucketCounts[i] += o.state.bucketCounts[i]
|
||||
}
|
||||
return nil
|
||||
}
|
135
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue/lastvalue.go
generated
vendored
135
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue/lastvalue.go
generated
vendored
@ -1,135 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lastvalue // import "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
)
|
||||
|
||||
type (
|
||||
|
||||
// Aggregator aggregates lastValue events.
|
||||
Aggregator struct {
|
||||
// value is an atomic pointer to *lastValueData. It is never nil.
|
||||
value unsafe.Pointer
|
||||
}
|
||||
|
||||
// lastValueData stores the current value of a lastValue along with
|
||||
// a sequence number to determine the winner of a race.
|
||||
lastValueData struct {
|
||||
// value is the int64- or float64-encoded Set() data
|
||||
//
|
||||
// value needs to be aligned for 64-bit atomic operations.
|
||||
value number.Number
|
||||
|
||||
// timestamp indicates when this record was submitted.
|
||||
// this can be used to pick a winner when multiple
|
||||
// records contain lastValue data for the same labels due
|
||||
// to races.
|
||||
timestamp time.Time
|
||||
}
|
||||
)
|
||||
|
||||
var _ export.Aggregator = &Aggregator{}
|
||||
var _ aggregation.LastValue = &Aggregator{}
|
||||
|
||||
// An unset lastValue has zero timestamp and zero value.
|
||||
var unsetLastValue = &lastValueData{}
|
||||
|
||||
// New returns a new lastValue aggregator. This aggregator retains the
|
||||
// last value and timestamp that were recorded.
|
||||
func New(cnt int) []Aggregator {
|
||||
aggs := make([]Aggregator, cnt)
|
||||
for i := range aggs {
|
||||
aggs[i] = Aggregator{
|
||||
value: unsafe.Pointer(unsetLastValue),
|
||||
}
|
||||
}
|
||||
return aggs
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (g *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return g
|
||||
}
|
||||
|
||||
// Kind returns aggregation.LastValueKind.
|
||||
func (g *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.LastValueKind
|
||||
}
|
||||
|
||||
// LastValue returns the last-recorded lastValue value and the
|
||||
// corresponding timestamp. The error value aggregation.ErrNoData
|
||||
// will be returned if (due to a race condition) the checkpoint was
|
||||
// computed before the first value was set.
|
||||
func (g *Aggregator) LastValue() (number.Number, time.Time, error) {
|
||||
gd := (*lastValueData)(g.value)
|
||||
if gd == unsetLastValue {
|
||||
return 0, time.Time{}, aggregation.ErrNoData
|
||||
}
|
||||
return gd.value.AsNumber(), gd.timestamp, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove atomically saves the current value.
|
||||
func (g *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error {
|
||||
if oa == nil {
|
||||
atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue))
|
||||
return nil
|
||||
}
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(g, oa)
|
||||
}
|
||||
o.value = atomic.SwapPointer(&g.value, unsafe.Pointer(unsetLastValue))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update atomically sets the current "last" value.
|
||||
func (g *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
|
||||
ngd := &lastValueData{
|
||||
value: number,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
atomic.StorePointer(&g.value, unsafe.Pointer(ngd))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines state from two aggregators. The most-recently set
|
||||
// value is chosen.
|
||||
func (g *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(g, oa)
|
||||
}
|
||||
|
||||
ggd := (*lastValueData)(atomic.LoadPointer(&g.value))
|
||||
ogd := (*lastValueData)(atomic.LoadPointer(&o.value))
|
||||
|
||||
if ggd.timestamp.After(ogd.timestamp) {
|
||||
return nil
|
||||
}
|
||||
|
||||
g.value = unsafe.Pointer(ogd)
|
||||
return nil
|
||||
}
|
165
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount/mmsc.go
generated
vendored
165
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount/mmsc.go
generated
vendored
@ -1,165 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package minmaxsumcount // import "go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
)
|
||||
|
||||
type (
|
||||
// Aggregator aggregates events that form a distribution,
|
||||
// keeping only the min, max, sum, and count.
|
||||
Aggregator struct {
|
||||
lock sync.Mutex
|
||||
kind number.Kind
|
||||
state
|
||||
}
|
||||
|
||||
state struct {
|
||||
sum number.Number
|
||||
min number.Number
|
||||
max number.Number
|
||||
count uint64
|
||||
}
|
||||
)
|
||||
|
||||
var _ export.Aggregator = &Aggregator{}
|
||||
var _ aggregation.MinMaxSumCount = &Aggregator{}
|
||||
|
||||
// New returns a new aggregator for computing the min, max, sum, and
|
||||
// count.
|
||||
//
|
||||
// This type uses a mutex for Update() and SynchronizedMove() concurrency.
|
||||
func New(cnt int, desc *metric.Descriptor) []Aggregator {
|
||||
kind := desc.NumberKind()
|
||||
aggs := make([]Aggregator, cnt)
|
||||
for i := range aggs {
|
||||
aggs[i] = Aggregator{
|
||||
kind: kind,
|
||||
state: emptyState(kind),
|
||||
}
|
||||
}
|
||||
return aggs
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.MinMaxSumCountKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.MinMaxSumCountKind
|
||||
}
|
||||
|
||||
// Sum returns the sum of values in the checkpoint.
|
||||
func (c *Aggregator) Sum() (number.Number, error) {
|
||||
return c.sum, nil
|
||||
}
|
||||
|
||||
// Count returns the number of values in the checkpoint.
|
||||
func (c *Aggregator) Count() (uint64, error) {
|
||||
return c.count, nil
|
||||
}
|
||||
|
||||
// Min returns the minimum value in the checkpoint.
|
||||
// The error value aggregation.ErrNoData will be returned
|
||||
// if there were no measurements recorded during the checkpoint.
|
||||
func (c *Aggregator) Min() (number.Number, error) {
|
||||
if c.count == 0 {
|
||||
return 0, aggregation.ErrNoData
|
||||
}
|
||||
return c.min, nil
|
||||
}
|
||||
|
||||
// Max returns the maximum value in the checkpoint.
|
||||
// The error value aggregation.ErrNoData will be returned
|
||||
// if there were no measurements recorded during the checkpoint.
|
||||
func (c *Aggregator) Max() (number.Number, error) {
|
||||
if c.count == 0 {
|
||||
return 0, aggregation.ErrNoData
|
||||
}
|
||||
return c.max, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove saves the current state into oa and resets the current state to
|
||||
// the empty set.
|
||||
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
|
||||
if oa != nil && o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
c.lock.Lock()
|
||||
if o != nil {
|
||||
o.state = c.state
|
||||
}
|
||||
c.state = emptyState(c.kind)
|
||||
c.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func emptyState(kind number.Kind) state {
|
||||
return state{
|
||||
count: 0,
|
||||
sum: 0,
|
||||
min: kind.Maximum(),
|
||||
max: kind.Minimum(),
|
||||
}
|
||||
}
|
||||
|
||||
// Update adds the recorded measurement to the current data set.
|
||||
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
|
||||
kind := desc.NumberKind()
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.count++
|
||||
c.sum.AddNumber(kind, number)
|
||||
if number.CompareNumber(kind, c.min) < 0 {
|
||||
c.min = number
|
||||
}
|
||||
if number.CompareNumber(kind, c.max) > 0 {
|
||||
c.max = number
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two data sets into one.
|
||||
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
c.count += o.count
|
||||
c.sum.AddNumber(desc.NumberKind(), o.sum)
|
||||
|
||||
if c.min.CompareNumber(desc.NumberKind(), o.min) > 0 {
|
||||
c.min.SetNumber(o.min)
|
||||
}
|
||||
if c.max.CompareNumber(desc.NumberKind(), o.max) < 0 {
|
||||
c.max.SetNumber(o.max)
|
||||
}
|
||||
return nil
|
||||
}
|
106
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/sum/sum.go
generated
vendored
106
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/sum/sum.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
)
|
||||
|
||||
// Aggregator aggregates counter events.
|
||||
type Aggregator struct {
|
||||
// current holds current increments to this counter record
|
||||
// current needs to be aligned for 64-bit atomic operations.
|
||||
value number.Number
|
||||
}
|
||||
|
||||
var _ export.Aggregator = &Aggregator{}
|
||||
var _ export.Subtractor = &Aggregator{}
|
||||
var _ aggregation.Sum = &Aggregator{}
|
||||
|
||||
// New returns a new counter aggregator implemented by atomic
|
||||
// operations. This aggregator implements the aggregation.Sum
|
||||
// export interface.
|
||||
func New(cnt int) []Aggregator {
|
||||
return make([]Aggregator, cnt)
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.SumKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.SumKind
|
||||
}
|
||||
|
||||
// Sum returns the last-checkpointed sum. This will never return an
|
||||
// error.
|
||||
func (c *Aggregator) Sum() (number.Number, error) {
|
||||
return c.value, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove atomically saves the current value into oa and resets the
|
||||
// current sum to zero.
|
||||
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error {
|
||||
if oa == nil {
|
||||
c.value.SetRawAtomic(0)
|
||||
return nil
|
||||
}
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
o.value = c.value.SwapNumberAtomic(number.Number(0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update atomically adds to the current value.
|
||||
func (c *Aggregator) Update(_ context.Context, num number.Number, desc *metric.Descriptor) error {
|
||||
c.value.AddNumberAtomic(desc.NumberKind(), num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two counters by adding their sums.
|
||||
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
c.value.AddNumber(desc.NumberKind(), o.value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Aggregator) Subtract(opAgg, resAgg export.Aggregator, descriptor *metric.Descriptor) error {
|
||||
op, _ := opAgg.(*Aggregator)
|
||||
if op == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, opAgg)
|
||||
}
|
||||
|
||||
res, _ := resAgg.(*Aggregator)
|
||||
if res == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, resAgg)
|
||||
}
|
||||
|
||||
res.value = c.value
|
||||
res.value.AddNumber(descriptor.NumberKind(), number.NewNumberSignChange(descriptor.NumberKind(), op.value))
|
||||
return nil
|
||||
}
|
122
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/config.go
generated
vendored
122
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/config.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Config contains configuration for a basic Controller.
|
||||
type Config struct {
|
||||
// Resource is the OpenTelemetry resource associated with all Meters
|
||||
// created by the Controller.
|
||||
Resource *resource.Resource
|
||||
|
||||
// CollectPeriod is the interval between calls to Collect a
|
||||
// checkpoint.
|
||||
//
|
||||
// When pulling metrics and not exporting, this is the minimum
|
||||
// time between calls to Collect. In a pull-only
|
||||
// configuration, collection is performed on demand; set
|
||||
// CollectPeriod to 0 always recompute the export record set.
|
||||
//
|
||||
// When exporting metrics, this must be > 0.
|
||||
//
|
||||
// Default value is 10s.
|
||||
CollectPeriod time.Duration
|
||||
|
||||
// CollectTimeout is the timeout of the Context passed to
|
||||
// Collect() and subsequently to Observer instrument callbacks.
|
||||
//
|
||||
// Default value is 10s. If zero, no Collect timeout is applied.
|
||||
CollectTimeout time.Duration
|
||||
|
||||
// Exporter is used for exporting metric data.
|
||||
//
|
||||
// Note: Exporters such as Prometheus that pull data do not implement
|
||||
// export.Exporter. These will directly call Collect() and ForEach().
|
||||
Exporter export.Exporter
|
||||
|
||||
// PushTimeout is the timeout of the Context when a exporter is configured.
|
||||
//
|
||||
// Default value is 10s. If zero, no Export timeout is applied.
|
||||
PushTimeout time.Duration
|
||||
}
|
||||
|
||||
// Option is the interface that applies the value to a configuration option.
|
||||
type Option interface {
|
||||
// Apply sets the Option value of a Config.
|
||||
Apply(*Config)
|
||||
}
|
||||
|
||||
// WithResource sets the Resource configuration option of a Config by merging it
|
||||
// with the Resource configuration in the environment.
|
||||
func WithResource(r *resource.Resource) Option {
|
||||
res := resource.Merge(resource.Environment(), r)
|
||||
return resourceOption{res}
|
||||
}
|
||||
|
||||
type resourceOption struct{ *resource.Resource }
|
||||
|
||||
func (o resourceOption) Apply(config *Config) {
|
||||
config.Resource = o.Resource
|
||||
}
|
||||
|
||||
// WithCollectPeriod sets the CollectPeriod configuration option of a Config.
|
||||
func WithCollectPeriod(period time.Duration) Option {
|
||||
return collectPeriodOption(period)
|
||||
}
|
||||
|
||||
type collectPeriodOption time.Duration
|
||||
|
||||
func (o collectPeriodOption) Apply(config *Config) {
|
||||
config.CollectPeriod = time.Duration(o)
|
||||
}
|
||||
|
||||
// WithCollectTimeout sets the CollectTimeout configuration option of a Config.
|
||||
func WithCollectTimeout(timeout time.Duration) Option {
|
||||
return collectTimeoutOption(timeout)
|
||||
}
|
||||
|
||||
type collectTimeoutOption time.Duration
|
||||
|
||||
func (o collectTimeoutOption) Apply(config *Config) {
|
||||
config.CollectTimeout = time.Duration(o)
|
||||
}
|
||||
|
||||
// WithExporter sets the exporter configuration option of a Config.
|
||||
func WithExporter(exporter export.Exporter) Option {
|
||||
return exporterOption{exporter}
|
||||
}
|
||||
|
||||
type exporterOption struct{ exporter export.Exporter }
|
||||
|
||||
func (o exporterOption) Apply(config *Config) {
|
||||
config.Exporter = o.exporter
|
||||
}
|
||||
|
||||
// WithPushTimeout sets the PushTimeout configuration option of a Config.
|
||||
func WithPushTimeout(timeout time.Duration) Option {
|
||||
return pushTimeoutOption(timeout)
|
||||
}
|
||||
|
||||
type pushTimeoutOption time.Duration
|
||||
|
||||
func (o pushTimeoutOption) Apply(config *Config) {
|
||||
config.PushTimeout = time.Duration(o)
|
||||
}
|
312
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/controller.go
generated
vendored
312
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/controller.go
generated
vendored
@ -1,312 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// DefaultPeriod is used for:
|
||||
//
|
||||
// - the minimum time between calls to Collect()
|
||||
// - the timeout for Export()
|
||||
// - the timeout for Collect().
|
||||
const DefaultPeriod = 10 * time.Second
|
||||
|
||||
// ErrControllerStarted indicates that a controller was started more
|
||||
// than once.
|
||||
var ErrControllerStarted = fmt.Errorf("controller already started")
|
||||
|
||||
// Controller organizes and synchronizes collection of metric data in
|
||||
// both "pull" and "push" configurations. This supports two distinct
|
||||
// modes:
|
||||
//
|
||||
// - Push and Pull: Start() must be called to begin calling the exporter;
|
||||
// Collect() is called periodically by a background thread after starting
|
||||
// the controller.
|
||||
// - Pull-Only: Start() is optional in this case, to call Collect periodically.
|
||||
// If Start() is not called, Collect() can be called manually to initiate
|
||||
// collection
|
||||
//
|
||||
// The controller supports mixing push and pull access to metric data
|
||||
// using the export.CheckpointSet RWLock interface. Collection will
|
||||
// be blocked by a pull request in the basic controller.
|
||||
type Controller struct {
|
||||
lock sync.Mutex
|
||||
accumulator *sdk.Accumulator
|
||||
provider *registry.MeterProvider
|
||||
checkpointer export.Checkpointer
|
||||
exporter export.Exporter
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
clock controllerTime.Clock
|
||||
ticker controllerTime.Ticker
|
||||
|
||||
collectPeriod time.Duration
|
||||
collectTimeout time.Duration
|
||||
pushTimeout time.Duration
|
||||
|
||||
// collectedTime is used only in configurations with no
|
||||
// exporter, when ticker != nil.
|
||||
collectedTime time.Time
|
||||
}
|
||||
|
||||
// New constructs a Controller using the provided checkpointer and
|
||||
// options (including optional exporter) to configure a metric
|
||||
// export pipeline.
|
||||
func New(checkpointer export.Checkpointer, opts ...Option) *Controller {
|
||||
c := &Config{
|
||||
CollectPeriod: DefaultPeriod,
|
||||
CollectTimeout: DefaultPeriod,
|
||||
PushTimeout: DefaultPeriod,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(c)
|
||||
}
|
||||
if c.Resource == nil {
|
||||
c.Resource = resource.Default()
|
||||
}
|
||||
|
||||
impl := sdk.NewAccumulator(
|
||||
checkpointer,
|
||||
c.Resource,
|
||||
)
|
||||
return &Controller{
|
||||
provider: registry.NewMeterProvider(impl),
|
||||
accumulator: impl,
|
||||
checkpointer: checkpointer,
|
||||
exporter: c.Exporter,
|
||||
stopCh: nil,
|
||||
clock: controllerTime.RealClock{},
|
||||
|
||||
collectPeriod: c.CollectPeriod,
|
||||
collectTimeout: c.CollectTimeout,
|
||||
pushTimeout: c.PushTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// SetClock supports setting a mock clock for testing. This must be
|
||||
// called before Start().
|
||||
func (c *Controller) SetClock(clock controllerTime.Clock) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.clock = clock
|
||||
}
|
||||
|
||||
// MeterProvider returns a MeterProvider instance for this controller.
|
||||
func (c *Controller) MeterProvider() metric.MeterProvider {
|
||||
return c.provider
|
||||
}
|
||||
|
||||
// Start begins a ticker that periodically collects and exports
|
||||
// metrics with the configured interval. This is required for calling
|
||||
// a configured Exporter (see WithExporter) and is otherwise optional
|
||||
// when only pulling metric data.
|
||||
//
|
||||
// The passed context is passed to Collect() and subsequently to
|
||||
// asynchronous instrument callbacks. Returns an error when the
|
||||
// controller was already started.
|
||||
//
|
||||
// Note that it is not necessary to Start a controller when only
|
||||
// pulling data; use the Collect() and ForEach() methods directly in
|
||||
// this case.
|
||||
func (c *Controller) Start(ctx context.Context) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.stopCh != nil {
|
||||
return ErrControllerStarted
|
||||
}
|
||||
|
||||
c.wg.Add(1)
|
||||
c.stopCh = make(chan struct{})
|
||||
c.ticker = c.clock.Ticker(c.collectPeriod)
|
||||
go c.runTicker(ctx, c.stopCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop waits for the background goroutine to return and then collects
|
||||
// and exports metrics one last time before returning. The passed
|
||||
// context is passed to the final Collect() and subsequently to the
|
||||
// final asynchronous instruments.
|
||||
//
|
||||
// Note that Stop() will not cancel an ongoing collection or export.
|
||||
func (c *Controller) Stop(ctx context.Context) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.stopCh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
close(c.stopCh)
|
||||
c.stopCh = nil
|
||||
c.wg.Wait()
|
||||
c.ticker.Stop()
|
||||
c.ticker = nil
|
||||
|
||||
return c.collect(ctx)
|
||||
}
|
||||
|
||||
// runTicker collection on ticker events until the stop channel is closed.
|
||||
func (c *Controller) runTicker(ctx context.Context, stopCh chan struct{}) {
|
||||
defer c.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-c.ticker.C():
|
||||
if err := c.collect(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collect computes a checkpoint and optionally exports it.
|
||||
func (c *Controller) collect(ctx context.Context) error {
|
||||
if err := c.checkpoint(ctx, func() bool {
|
||||
return true
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.exporter == nil {
|
||||
return nil
|
||||
}
|
||||
// Note: this is not subject to collectTimeout. This blocks the next
|
||||
// collection despite collectTimeout because it holds a lock.
|
||||
if err := c.export(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkpoint calls the Accumulator and Checkpointer interfaces to
|
||||
// compute the CheckpointSet. This applies the configured collection
|
||||
// timeout. Note that this does not try to cancel a Collect or Export
|
||||
// when Stop() is called.
|
||||
func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
ckpt.Lock()
|
||||
defer ckpt.Unlock()
|
||||
|
||||
if !cond() {
|
||||
return nil
|
||||
}
|
||||
c.checkpointer.StartCollection()
|
||||
|
||||
if c.collectTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.collectTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
_ = c.accumulator.Collect(ctx)
|
||||
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
// The context wasn't done, ok.
|
||||
}
|
||||
|
||||
// Finish the checkpoint whether the accumulator timed out or not.
|
||||
if cerr := c.checkpointer.FinishCollection(); cerr != nil {
|
||||
if err == nil {
|
||||
err = cerr
|
||||
} else {
|
||||
err = fmt.Errorf("%s: %w", cerr.Error(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// export calls the exporter with a read lock on the CheckpointSet,
|
||||
// applying the configured export timeout.
|
||||
func (c *Controller) export(ctx context.Context) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
ckpt.RLock()
|
||||
defer ckpt.RUnlock()
|
||||
|
||||
if c.pushTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.pushTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
return c.exporter.Export(ctx, ckpt)
|
||||
}
|
||||
|
||||
// Foreach gives the caller read-locked access to the current
|
||||
// export.CheckpointSet.
|
||||
func (c *Controller) ForEach(ks export.ExportKindSelector, f func(export.Record) error) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
ckpt.RLock()
|
||||
defer ckpt.RUnlock()
|
||||
|
||||
return ckpt.ForEach(ks, f)
|
||||
}
|
||||
|
||||
// IsRunning returns true if the controller was started via Start(),
|
||||
// indicating that the current export.CheckpointSet is being kept
|
||||
// up-to-date.
|
||||
func (c *Controller) IsRunning() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.ticker != nil
|
||||
}
|
||||
|
||||
// Collect requests a collection. The collection will be skipped if
|
||||
// the last collection is aged less than the configured collection
|
||||
// period.
|
||||
func (c *Controller) Collect(ctx context.Context) error {
|
||||
if c.IsRunning() {
|
||||
// When there's a non-nil ticker, there's a goroutine
|
||||
// computing checkpoints with the collection period.
|
||||
return ErrControllerStarted
|
||||
}
|
||||
|
||||
return c.checkpoint(ctx, c.shouldCollect)
|
||||
}
|
||||
|
||||
// shouldCollect returns true if the collector should collect now,
|
||||
// based on the timestamp, the last collection time, and the
|
||||
// configured period.
|
||||
func (c *Controller) shouldCollect() bool {
|
||||
// This is called with the CheckpointSet exclusive
|
||||
// lock held.
|
||||
if c.collectPeriod == 0 {
|
||||
return true
|
||||
}
|
||||
now := c.clock.Now()
|
||||
if now.Sub(c.collectedTime) < c.collectPeriod {
|
||||
return false
|
||||
}
|
||||
c.collectedTime = now
|
||||
return true
|
||||
}
|
59
vendor/go.opentelemetry.io/otel/sdk/metric/controller/time/time.go
generated
vendored
59
vendor/go.opentelemetry.io/otel/sdk/metric/controller/time/time.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package time // import "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
|
||||
import (
|
||||
"time"
|
||||
lib "time"
|
||||
)
|
||||
|
||||
// Several types below are created to match "github.com/benbjohnson/clock"
|
||||
// so that it remains a test-only dependency.
|
||||
|
||||
type Clock interface {
|
||||
Now() lib.Time
|
||||
Ticker(duration lib.Duration) Ticker
|
||||
}
|
||||
|
||||
type Ticker interface {
|
||||
Stop()
|
||||
C() <-chan lib.Time
|
||||
}
|
||||
|
||||
type RealClock struct {
|
||||
}
|
||||
|
||||
type RealTicker struct {
|
||||
ticker *lib.Ticker
|
||||
}
|
||||
|
||||
var _ Clock = RealClock{}
|
||||
var _ Ticker = RealTicker{}
|
||||
|
||||
func (RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (RealClock) Ticker(period time.Duration) Ticker {
|
||||
return RealTicker{time.NewTicker(period)}
|
||||
}
|
||||
|
||||
func (t RealTicker) Stop() {
|
||||
t.ticker.Stop()
|
||||
}
|
||||
|
||||
func (t RealTicker) C() <-chan time.Time {
|
||||
return t.ticker.C
|
||||
}
|
141
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
141
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package metric implements the OpenTelemetry metric API.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
|
||||
The Accumulator type supports configurable metrics export behavior through a
|
||||
collection of export interfaces that support various export strategies,
|
||||
described below.
|
||||
|
||||
The OpenTelemetry metric API consists of methods for constructing synchronous
|
||||
and asynchronous instruments. There are two constructors per instrument for
|
||||
the two kinds of number (int64, float64).
|
||||
|
||||
Synchronous instruments are managed by a sync.Map containing a *record
|
||||
with the current state for each synchronous instrument. A bound
|
||||
instrument encapsulates a direct pointer to the record, allowing
|
||||
bound metric events to bypass a sync.Map lookup. A lock-free
|
||||
algorithm is used to protect against races when adding and removing
|
||||
items from the sync.Map.
|
||||
|
||||
Asynchronous instruments are managed by an internal
|
||||
AsyncInstrumentState, which coordinates calling batch and single
|
||||
instrument callbacks.
|
||||
|
||||
Internal Structure
|
||||
|
||||
Each observer also has its own kind of record stored in the SDK. This
|
||||
record contains a set of recorders for every specific label set used in the
|
||||
callback.
|
||||
|
||||
A sync.Map maintains the mapping of current instruments and label sets to
|
||||
internal records. To create a new bound instrument, the SDK consults the Map to
|
||||
locate an existing record, otherwise it constructs a new record. The SDK
|
||||
maintains a count of the number of references to each record, ensuring
|
||||
that records are not reclaimed from the Map while they are still active
|
||||
from the user's perspective.
|
||||
|
||||
Metric collection is performed via a single-threaded call to Collect that
|
||||
sweeps through all records in the SDK, checkpointing their state. When a
|
||||
record is discovered that has no references and has not been updated since
|
||||
the prior collection pass, it is removed from the Map.
|
||||
|
||||
Both synchronous and asynchronous instruments have an associated
|
||||
aggregator, which maintains the current state resulting from all metric
|
||||
events since its last checkpoint. Aggregators may be lock-free or they may
|
||||
use locking, but they should expect to be called concurrently. Aggregators
|
||||
must be capable of merging with another aggregator of the same type.
|
||||
|
||||
Export Pipeline
|
||||
|
||||
While the SDK serves to maintain a current set of records and
|
||||
coordinate collection, the behavior of a metrics export pipeline is
|
||||
configured through the export types in
|
||||
go.opentelemetry.io/otel/sdk/export/metric. It is important to keep
|
||||
in mind the context these interfaces are called from. There are two
|
||||
contexts, instrumentation context, where a user-level goroutine that
|
||||
enters the SDK resulting in a new record, and collection context,
|
||||
where a system-level thread performs a collection pass through the
|
||||
SDK.
|
||||
|
||||
Descriptor is a struct that describes the metric instrument to the
|
||||
export pipeline, containing the name, units, description, metric kind,
|
||||
number kind (int64 or float64). A Descriptor accompanies metric data
|
||||
as it passes through the export pipeline.
|
||||
|
||||
The AggregatorSelector interface supports choosing the method of
|
||||
aggregation to apply to a particular instrument, by delegating the
|
||||
construction of an Aggregator to this interface. Given the Descriptor,
|
||||
the AggregatorFor method returns an implementation of Aggregator. If this
|
||||
interface returns nil, the metric will be disabled. The aggregator should
|
||||
be matched to the capabilities of the exporter. Selecting the aggregator
|
||||
for Adding instruments is relatively straightforward, but many options
|
||||
are available for aggregating distributions from Grouping instruments.
|
||||
|
||||
Aggregator is an interface which implements a concrete strategy for
|
||||
aggregating metric updates. Several Aggregator implementations are
|
||||
provided by the SDK. Aggregators may be lock-free or use locking,
|
||||
depending on their structure and semantics. Aggregators implement an
|
||||
Update method, called in instrumentation context, to receive a single
|
||||
metric event. Aggregators implement a Checkpoint method, called in
|
||||
collection context, to save a checkpoint of the current state.
|
||||
Aggregators implement a Merge method, also called in collection
|
||||
context, that combines state from two aggregators into one. Each SDK
|
||||
record has an associated aggregator.
|
||||
|
||||
Processor is an interface which sits between the SDK and an exporter.
|
||||
The Processor embeds an AggregatorSelector, used by the SDK to assign
|
||||
new Aggregators. The Processor supports a Process() API for submitting
|
||||
checkpointed aggregators to the processor, and a CheckpointSet() API
|
||||
for producing a complete checkpoint for the exporter. Two default
|
||||
Processor implementations are provided, the "defaultkeys" Processor groups
|
||||
aggregate metrics by their recommended Descriptor.Keys(), the
|
||||
"simple" Processor aggregates metrics at full dimensionality.
|
||||
|
||||
LabelEncoder is an optional optimization that allows an exporter to
|
||||
provide the serialization logic for labels. This allows avoiding
|
||||
duplicate serialization of labels, once as a unique key in the SDK (or
|
||||
Processor) and once in the exporter.
|
||||
|
||||
CheckpointSet is an interface between the Processor and the Exporter.
|
||||
After completing a collection pass, the Processor.CheckpointSet() method
|
||||
returns a CheckpointSet, which the Exporter uses to iterate over all
|
||||
the updated metrics.
|
||||
|
||||
Record is a struct containing the state of an individual exported
|
||||
metric. This is the result of one collection interface for one
|
||||
instrument and one label set.
|
||||
|
||||
Labels is a struct containing an ordered set of labels, the
|
||||
corresponding unique encoding, and the encoder that produced it.
|
||||
|
||||
Exporter is the final stage of an export pipeline. It is called with
|
||||
a CheckpointSet capable of enumerating all the updated metrics.
|
||||
|
||||
Controller is not an export interface per se, but it orchestrates the
|
||||
export pipeline. For example, a "push" controller will establish a
|
||||
periodic timer to regularly collect and export metrics. A "pull"
|
||||
controller will await a pull request before initiating metric
|
||||
collection. Either way, the job of the controller is to call the SDK
|
||||
Collect() method, then read the checkpoint, then invoke the exporter.
|
||||
Controllers are expected to implement the public metric.MeterProvider
|
||||
API, meaning they can be installed as the global Meter provider.
|
||||
|
||||
*/
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
377
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/basic.go
generated
vendored
377
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/basic.go
generated
vendored
@ -1,377 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type (
|
||||
Processor struct {
|
||||
export.ExportKindSelector
|
||||
export.AggregatorSelector
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
stateKey struct {
|
||||
// TODO: This code is organized to support multiple
|
||||
// accumulators which could theoretically produce the
|
||||
// data for the same instrument with the same
|
||||
// resources, and this code has logic to combine data
|
||||
// properly from multiple accumulators. However, the
|
||||
// use of *metric.Descriptor in the stateKey makes
|
||||
// such combination impossible, because each
|
||||
// accumulator allocates its own instruments. This
|
||||
// can be fixed by using the instrument name and kind
|
||||
// instead of the descriptor pointer. See
|
||||
// https://github.com/open-telemetry/opentelemetry-go/issues/862.
|
||||
descriptor *metric.Descriptor
|
||||
distinct attribute.Distinct
|
||||
resource attribute.Distinct
|
||||
}
|
||||
|
||||
stateValue struct {
|
||||
// labels corresponds to the stateKey.distinct field.
|
||||
labels *attribute.Set
|
||||
|
||||
// resource corresponds to the stateKey.resource field.
|
||||
resource *resource.Resource
|
||||
|
||||
// updated indicates the last sequence number when this value had
|
||||
// Process() called by an accumulator.
|
||||
updated int64
|
||||
|
||||
// stateful indicates that a cumulative aggregation is
|
||||
// being maintained, taken from the process start time.
|
||||
stateful bool
|
||||
|
||||
// currentOwned indicates that "current" was allocated
|
||||
// by the processor in order to merge results from
|
||||
// multiple Accumulators during a single collection
|
||||
// round, which may happen either because:
|
||||
// (1) multiple Accumulators output the same Accumulation.
|
||||
// (2) one Accumulator is configured with dimensionality reduction.
|
||||
currentOwned bool
|
||||
|
||||
// current refers to the output from a single Accumulator
|
||||
// (if !currentOwned) or it refers to an Aggregator
|
||||
// owned by the processor used to accumulate multiple
|
||||
// values in a single collection round.
|
||||
current export.Aggregator
|
||||
|
||||
// delta, if non-nil, refers to an Aggregator owned by
|
||||
// the processor used to compute deltas between
|
||||
// precomputed sums.
|
||||
delta export.Aggregator
|
||||
|
||||
// cumulative, if non-nil, refers to an Aggregator owned
|
||||
// by the processor used to store the last cumulative
|
||||
// value.
|
||||
cumulative export.Aggregator
|
||||
}
|
||||
|
||||
state struct {
|
||||
config Config
|
||||
|
||||
// RWMutex implements locking for the `CheckpointSet` interface.
|
||||
sync.RWMutex
|
||||
values map[stateKey]*stateValue
|
||||
|
||||
// Note: the timestamp logic currently assumes all
|
||||
// exports are deltas.
|
||||
|
||||
processStart time.Time
|
||||
intervalStart time.Time
|
||||
intervalEnd time.Time
|
||||
|
||||
// startedCollection and finishedCollection are the
|
||||
// number of StartCollection() and FinishCollection()
|
||||
// calls, used to ensure that the sequence of starts
|
||||
// and finishes are correctly balanced.
|
||||
|
||||
startedCollection int64
|
||||
finishedCollection int64
|
||||
}
|
||||
)
|
||||
|
||||
var _ export.Processor = &Processor{}
|
||||
var _ export.Checkpointer = &Processor{}
|
||||
var _ export.CheckpointSet = &state{}
|
||||
var ErrInconsistentState = fmt.Errorf("inconsistent processor state")
|
||||
var ErrInvalidExportKind = fmt.Errorf("invalid export kind")
|
||||
|
||||
// New returns a basic Processor that is also a Checkpointer using the provided
|
||||
// AggregatorSelector to select Aggregators. The ExportKindSelector
|
||||
// is consulted to determine the kind(s) of exporter that will consume
|
||||
// data, so that this Processor can prepare to compute Delta or
|
||||
// Cumulative Aggregations as needed.
|
||||
func New(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) *Processor {
|
||||
now := time.Now()
|
||||
p := &Processor{
|
||||
AggregatorSelector: aselector,
|
||||
ExportKindSelector: eselector,
|
||||
state: state{
|
||||
values: map[stateKey]*stateValue{},
|
||||
processStart: now,
|
||||
intervalStart: now,
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.ApplyProcessor(&p.config)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Process implements export.Processor.
|
||||
func (b *Processor) Process(accum export.Accumulation) error {
|
||||
if b.startedCollection != b.finishedCollection+1 {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
desc := accum.Descriptor()
|
||||
key := stateKey{
|
||||
descriptor: desc,
|
||||
distinct: accum.Labels().Equivalent(),
|
||||
resource: accum.Resource().Equivalent(),
|
||||
}
|
||||
agg := accum.Aggregator()
|
||||
|
||||
// Check if there is an existing value.
|
||||
value, ok := b.state.values[key]
|
||||
if !ok {
|
||||
stateful := b.ExportKindFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind())
|
||||
|
||||
newValue := &stateValue{
|
||||
labels: accum.Labels(),
|
||||
resource: accum.Resource(),
|
||||
updated: b.state.finishedCollection,
|
||||
stateful: stateful,
|
||||
current: agg,
|
||||
}
|
||||
if stateful {
|
||||
if desc.InstrumentKind().PrecomputedSum() {
|
||||
// If we know we need to compute deltas, allocate two aggregators.
|
||||
b.AggregatorFor(desc, &newValue.cumulative, &newValue.delta)
|
||||
} else {
|
||||
// In this case we are certain not to need a delta, only allocate
|
||||
// a cumulative aggregator.
|
||||
b.AggregatorFor(desc, &newValue.cumulative)
|
||||
}
|
||||
}
|
||||
b.state.values[key] = newValue
|
||||
return nil
|
||||
}
|
||||
|
||||
// Advance the update sequence number.
|
||||
sameCollection := b.state.finishedCollection == value.updated
|
||||
value.updated = b.state.finishedCollection
|
||||
|
||||
// At this point in the code, we have located an existing
|
||||
// value for some stateKey. This can be because:
|
||||
//
|
||||
// (a) stateful aggregation is being used, the entry was
|
||||
// entered during a prior collection, and this is the first
|
||||
// time processing an accumulation for this stateKey in the
|
||||
// current collection. Since this is the first time
|
||||
// processing an accumulation for this stateKey during this
|
||||
// collection, we don't know yet whether there are multiple
|
||||
// accumulators at work. If there are multiple accumulators,
|
||||
// they'll hit case (b) the second time through.
|
||||
//
|
||||
// (b) multiple accumulators are being used, whether stateful
|
||||
// or not.
|
||||
//
|
||||
// Case (a) occurs when the instrument and the exporter
|
||||
// require memory to work correctly, either because the
|
||||
// instrument reports a PrecomputedSum to a DeltaExporter or
|
||||
// the reverse, a non-PrecomputedSum instrument with a
|
||||
// CumulativeExporter. This logic is encapsulated in
|
||||
// ExportKind.MemoryRequired(InstrumentKind).
|
||||
//
|
||||
// Case (b) occurs when the variable `sameCollection` is true,
|
||||
// indicating that the stateKey for Accumulation has already
|
||||
// been seen in the same collection. When this happens, it
|
||||
// implies that multiple Accumulators are being used, or that
|
||||
// a single Accumulator has been configured with a label key
|
||||
// filter.
|
||||
|
||||
if !sameCollection {
|
||||
if !value.currentOwned {
|
||||
// This is the first Accumulation we've seen
|
||||
// for this stateKey during this collection.
|
||||
// Just keep a reference to the Accumulator's
|
||||
// Aggregator. All the other cases copy
|
||||
// Aggregator state.
|
||||
value.current = agg
|
||||
return nil
|
||||
}
|
||||
return agg.SynchronizedMove(value.current, desc)
|
||||
}
|
||||
|
||||
// If the current is not owned, take ownership of a copy
|
||||
// before merging below.
|
||||
if !value.currentOwned {
|
||||
tmp := value.current
|
||||
b.AggregatorSelector.AggregatorFor(desc, &value.current)
|
||||
value.currentOwned = true
|
||||
if err := tmp.SynchronizedMove(value.current, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Combine this Accumulation with the prior Accumulation.
|
||||
return value.current.Merge(agg, desc)
|
||||
}
|
||||
|
||||
// CheckpointSet returns the associated CheckpointSet. Use the
|
||||
// CheckpointSet Locker interface to synchronize access to this
|
||||
// object. The CheckpointSet.ForEach() method cannot be called
|
||||
// concurrently with Process().
|
||||
func (b *Processor) CheckpointSet() export.CheckpointSet {
|
||||
return &b.state
|
||||
}
|
||||
|
||||
// StartCollection signals to the Processor one or more Accumulators
|
||||
// will begin calling Process() calls during collection.
|
||||
func (b *Processor) StartCollection() {
|
||||
if b.startedCollection != 0 {
|
||||
b.intervalStart = b.intervalEnd
|
||||
}
|
||||
b.startedCollection++
|
||||
}
|
||||
|
||||
// FinishCollection signals to the Processor that a complete
|
||||
// collection has finished and that ForEach will be called to access
|
||||
// the CheckpointSet.
|
||||
func (b *Processor) FinishCollection() error {
|
||||
b.intervalEnd = time.Now()
|
||||
if b.startedCollection != b.finishedCollection+1 {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
defer func() { b.finishedCollection++ }()
|
||||
|
||||
for key, value := range b.values {
|
||||
mkind := key.descriptor.InstrumentKind()
|
||||
stale := value.updated != b.finishedCollection
|
||||
stateless := !value.stateful
|
||||
|
||||
// The following branch updates stateful aggregators. Skip
|
||||
// these updates if the aggregator is not stateful or if the
|
||||
// aggregator is stale.
|
||||
if stale || stateless {
|
||||
// If this processor does not require memeory,
|
||||
// stale, stateless entries can be removed.
|
||||
// This implies that they were not updated
|
||||
// over the previous full collection interval.
|
||||
if stale && stateless && !b.config.Memory {
|
||||
delete(b.values, key)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Update Aggregator state to support exporting either a
|
||||
// delta or a cumulative aggregation.
|
||||
var err error
|
||||
if mkind.PrecomputedSum() {
|
||||
if currentSubtractor, ok := value.current.(export.Subtractor); ok {
|
||||
// This line is equivalent to:
|
||||
// value.delta = currentSubtractor - value.cumulative
|
||||
err = currentSubtractor.Subtract(value.cumulative, value.delta, key.descriptor)
|
||||
|
||||
if err == nil {
|
||||
err = value.current.SynchronizedMove(value.cumulative, key.descriptor)
|
||||
}
|
||||
} else {
|
||||
err = aggregation.ErrNoSubtraction
|
||||
}
|
||||
} else {
|
||||
// This line is equivalent to:
|
||||
// value.cumulative = value.cumulative + value.delta
|
||||
err = value.cumulative.Merge(value.current, key.descriptor)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEach iterates through the CheckpointSet, passing an
|
||||
// export.Record with the appropriate Cumulative or Delta aggregation
|
||||
// to an exporter.
|
||||
func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||
if b.startedCollection != b.finishedCollection {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
for key, value := range b.values {
|
||||
mkind := key.descriptor.InstrumentKind()
|
||||
|
||||
var agg aggregation.Aggregation
|
||||
var start time.Time
|
||||
|
||||
// If the processor does not have Config.Memory and it was not updated
|
||||
// in the prior round, do not visit this value.
|
||||
if !b.config.Memory && value.updated != (b.finishedCollection-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
ekind := exporter.ExportKindFor(key.descriptor, value.current.Aggregation().Kind())
|
||||
switch ekind {
|
||||
case export.CumulativeExportKind:
|
||||
// If stateful, the sum has been computed. If stateless, the
|
||||
// input was already cumulative. Either way, use the checkpointed
|
||||
// value:
|
||||
if value.stateful {
|
||||
agg = value.cumulative.Aggregation()
|
||||
} else {
|
||||
agg = value.current.Aggregation()
|
||||
}
|
||||
start = b.processStart
|
||||
|
||||
case export.DeltaExportKind:
|
||||
// Precomputed sums are a special case.
|
||||
if mkind.PrecomputedSum() {
|
||||
agg = value.delta.Aggregation()
|
||||
} else {
|
||||
agg = value.current.Aggregation()
|
||||
}
|
||||
start = b.intervalStart
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%v: %w", ekind, ErrInvalidExportKind)
|
||||
}
|
||||
|
||||
if err := f(export.NewRecord(
|
||||
key.descriptor,
|
||||
value.labels,
|
||||
value.resource,
|
||||
agg,
|
||||
start,
|
||||
b.intervalEnd,
|
||||
)); err != nil && !errors.Is(err, aggregation.ErrNoData) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
42
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/config.go
generated
vendored
42
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/config.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
|
||||
// Config contains the options for configuring a basic metric processor.
|
||||
type Config struct {
|
||||
// Memory controls whether the processor remembers metric
|
||||
// instruments and label sets that were previously reported.
|
||||
// When Memory is true, CheckpointSet.ForEach() will visit
|
||||
// metrics that were not updated in the most recent interval.
|
||||
Memory bool
|
||||
}
|
||||
|
||||
type Option interface {
|
||||
ApplyProcessor(*Config)
|
||||
}
|
||||
|
||||
// WithMemory sets the memory behavior of a Processor. If this is
|
||||
// true, the processor will report metric instruments and label sets
|
||||
// that were previously reported but not updated in the most recent
|
||||
// interval.
|
||||
func WithMemory(memory bool) Option {
|
||||
return memoryOption(memory)
|
||||
}
|
||||
|
||||
type memoryOption bool
|
||||
|
||||
func (m memoryOption) ApplyProcessor(config *Config) {
|
||||
config.Memory = bool(m)
|
||||
}
|
59
vendor/go.opentelemetry.io/otel/sdk/metric/refcount_mapped.go
generated
vendored
59
vendor/go.opentelemetry.io/otel/sdk/metric/refcount_mapped.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// refcountMapped atomically counts the number of references (usages) of an entry
|
||||
// while also keeping a state of mapped/unmapped into a different data structure
|
||||
// (an external map or list for example).
|
||||
//
|
||||
// refcountMapped uses an atomic value where the least significant bit is used to
|
||||
// keep the state of mapping ('1' is used for unmapped and '0' is for mapped) and
|
||||
// the rest of the bits are used for refcounting.
|
||||
type refcountMapped struct {
|
||||
// refcount has to be aligned for 64-bit atomic operations.
|
||||
value int64
|
||||
}
|
||||
|
||||
// ref returns true if the entry is still mapped and increases the
|
||||
// reference usages, if unmapped returns false.
|
||||
func (rm *refcountMapped) ref() bool {
|
||||
// Check if this entry was marked as unmapped between the moment
|
||||
// we got a reference to it (or will be removed very soon) and here.
|
||||
return atomic.AddInt64(&rm.value, 2)&1 == 0
|
||||
}
|
||||
|
||||
func (rm *refcountMapped) unref() {
|
||||
atomic.AddInt64(&rm.value, -2)
|
||||
}
|
||||
|
||||
// tryUnmap flips the mapped bit to "unmapped" state and returns true if both of the
|
||||
// following conditions are true upon entry to this function:
|
||||
// * There are no active references;
|
||||
// * The mapped bit is in "mapped" state.
|
||||
// Otherwise no changes are done to mapped bit and false is returned.
|
||||
func (rm *refcountMapped) tryUnmap() bool {
|
||||
if atomic.LoadInt64(&rm.value) != 0 {
|
||||
return false
|
||||
}
|
||||
return atomic.CompareAndSwapInt64(
|
||||
&rm.value,
|
||||
0,
|
||||
1,
|
||||
)
|
||||
}
|
555
vendor/go.opentelemetry.io/otel/sdk/metric/sdk.go
generated
vendored
555
vendor/go.opentelemetry.io/otel/sdk/metric/sdk.go
generated
vendored
@ -1,555 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
internal "go.opentelemetry.io/otel/internal/metric"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type (
|
||||
// Accumulator implements the OpenTelemetry Meter API. The
|
||||
// Accumulator is bound to a single export.Processor in
|
||||
// `NewAccumulator()`.
|
||||
//
|
||||
// The Accumulator supports a Collect() API to gather and export
|
||||
// current data. Collect() should be arranged according to
|
||||
// the processor model. Push-based processors will setup a
|
||||
// timer to call Collect() periodically. Pull-based processors
|
||||
// will call Collect() when a pull request arrives.
|
||||
Accumulator struct {
|
||||
// current maps `mapkey` to *record.
|
||||
current sync.Map
|
||||
|
||||
// asyncInstruments is a set of
|
||||
// `*asyncInstrument` instances
|
||||
asyncLock sync.Mutex
|
||||
asyncInstruments *internal.AsyncInstrumentState
|
||||
|
||||
// currentEpoch is the current epoch number. It is
|
||||
// incremented in `Collect()`.
|
||||
currentEpoch int64
|
||||
|
||||
// processor is the configured processor+configuration.
|
||||
processor export.Processor
|
||||
|
||||
// collectLock prevents simultaneous calls to Collect().
|
||||
collectLock sync.Mutex
|
||||
|
||||
// asyncSortSlice has a single purpose - as a temporary
|
||||
// place for sorting during labels creation to avoid
|
||||
// allocation. It is cleared after use.
|
||||
asyncSortSlice attribute.Sortable
|
||||
|
||||
// resource is applied to all records in this Accumulator.
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
syncInstrument struct {
|
||||
instrument
|
||||
}
|
||||
|
||||
// mapkey uniquely describes a metric instrument in terms of
|
||||
// its InstrumentID and the encoded form of its labels.
|
||||
mapkey struct {
|
||||
descriptor *metric.Descriptor
|
||||
ordered attribute.Distinct
|
||||
}
|
||||
|
||||
// record maintains the state of one metric instrument. Due
|
||||
// the use of lock-free algorithms, there may be more than one
|
||||
// `record` in existence at a time, although at most one can
|
||||
// be referenced from the `Accumulator.current` map.
|
||||
record struct {
|
||||
// refMapped keeps track of refcounts and the mapping state to the
|
||||
// Accumulator.current map.
|
||||
refMapped refcountMapped
|
||||
|
||||
// updateCount is incremented on every Update.
|
||||
updateCount int64
|
||||
|
||||
// collectedCount is set to updateCount on collection,
|
||||
// supports checking for no updates during a round.
|
||||
collectedCount int64
|
||||
|
||||
// storage is the stored label set for this record,
|
||||
// except in cases where a label set is shared due to
|
||||
// batch recording.
|
||||
storage attribute.Set
|
||||
|
||||
// labels is the processed label set for this record.
|
||||
// this may refer to the `storage` field in another
|
||||
// record if this label set is shared resulting from
|
||||
// `RecordBatch`.
|
||||
labels *attribute.Set
|
||||
|
||||
// sortSlice has a single purpose - as a temporary
|
||||
// place for sorting during labels creation to avoid
|
||||
// allocation.
|
||||
sortSlice attribute.Sortable
|
||||
|
||||
// inst is a pointer to the corresponding instrument.
|
||||
inst *syncInstrument
|
||||
|
||||
// current implements the actual RecordOne() API,
|
||||
// depending on the type of aggregation. If nil, the
|
||||
// metric was disabled by the exporter.
|
||||
current export.Aggregator
|
||||
checkpoint export.Aggregator
|
||||
}
|
||||
|
||||
instrument struct {
|
||||
meter *Accumulator
|
||||
descriptor metric.Descriptor
|
||||
}
|
||||
|
||||
asyncInstrument struct {
|
||||
instrument
|
||||
// recorders maps ordered labels to the pair of
|
||||
// labelset and recorder
|
||||
recorders map[attribute.Distinct]*labeledRecorder
|
||||
}
|
||||
|
||||
labeledRecorder struct {
|
||||
observedEpoch int64
|
||||
labels *attribute.Set
|
||||
observed export.Aggregator
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
_ metric.MeterImpl = &Accumulator{}
|
||||
_ metric.AsyncImpl = &asyncInstrument{}
|
||||
_ metric.SyncImpl = &syncInstrument{}
|
||||
_ metric.BoundSyncImpl = &record{}
|
||||
|
||||
ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument")
|
||||
)
|
||||
|
||||
func (inst *instrument) Descriptor() metric.Descriptor {
|
||||
return inst.descriptor
|
||||
}
|
||||
|
||||
func (a *asyncInstrument) Implementation() interface{} {
|
||||
return a
|
||||
}
|
||||
|
||||
func (s *syncInstrument) Implementation() interface{} {
|
||||
return s
|
||||
}
|
||||
|
||||
func (a *asyncInstrument) observe(num number.Number, labels *attribute.Set) {
|
||||
if err := aggregator.RangeTest(num, &a.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
recorder := a.getRecorder(labels)
|
||||
if recorder == nil {
|
||||
// The instrument is disabled according to the
|
||||
// AggregatorSelector.
|
||||
return
|
||||
}
|
||||
if err := recorder.Update(context.Background(), num, &a.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a *asyncInstrument) getRecorder(labels *attribute.Set) export.Aggregator {
|
||||
lrec, ok := a.recorders[labels.Equivalent()]
|
||||
if ok {
|
||||
// Note: SynchronizedMove(nil) can't return an error
|
||||
_ = lrec.observed.SynchronizedMove(nil, &a.descriptor)
|
||||
lrec.observedEpoch = a.meter.currentEpoch
|
||||
a.recorders[labels.Equivalent()] = lrec
|
||||
return lrec.observed
|
||||
}
|
||||
var rec export.Aggregator
|
||||
a.meter.processor.AggregatorFor(&a.descriptor, &rec)
|
||||
if a.recorders == nil {
|
||||
a.recorders = make(map[attribute.Distinct]*labeledRecorder)
|
||||
}
|
||||
// This may store nil recorder in the map, thus disabling the
|
||||
// asyncInstrument for the labelset for good. This is intentional,
|
||||
// but will be revisited later.
|
||||
a.recorders[labels.Equivalent()] = &labeledRecorder{
|
||||
observed: rec,
|
||||
labels: labels,
|
||||
observedEpoch: a.meter.currentEpoch,
|
||||
}
|
||||
return rec
|
||||
}
|
||||
|
||||
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
|
||||
// the input labels. The second argument `labels` is passed in to
|
||||
// support re-use of the orderedLabels computed by a previous
|
||||
// measurement in the same batch. This performs two allocations
|
||||
// in the common case.
|
||||
func (s *syncInstrument) acquireHandle(kvs []attribute.KeyValue, labelPtr *attribute.Set) *record {
|
||||
var rec *record
|
||||
var equiv attribute.Distinct
|
||||
|
||||
if labelPtr == nil {
|
||||
// This memory allocation may not be used, but it's
|
||||
// needed for the `sortSlice` field, to avoid an
|
||||
// allocation while sorting.
|
||||
rec = &record{}
|
||||
rec.storage = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
|
||||
rec.labels = &rec.storage
|
||||
equiv = rec.storage.Equivalent()
|
||||
} else {
|
||||
equiv = labelPtr.Equivalent()
|
||||
}
|
||||
|
||||
// Create lookup key for sync.Map (one allocation, as this
|
||||
// passes through an interface{})
|
||||
mk := mapkey{
|
||||
descriptor: &s.descriptor,
|
||||
ordered: equiv,
|
||||
}
|
||||
|
||||
if actual, ok := s.meter.current.Load(mk); ok {
|
||||
// Existing record case.
|
||||
existingRec := actual.(*record)
|
||||
if existingRec.refMapped.ref() {
|
||||
// At this moment it is guaranteed that the entry is in
|
||||
// the map and will not be removed.
|
||||
return existingRec
|
||||
}
|
||||
// This entry is no longer mapped, try to add a new entry.
|
||||
}
|
||||
|
||||
if rec == nil {
|
||||
rec = &record{}
|
||||
rec.labels = labelPtr
|
||||
}
|
||||
rec.refMapped = refcountMapped{value: 2}
|
||||
rec.inst = s
|
||||
|
||||
s.meter.processor.AggregatorFor(&s.descriptor, &rec.current, &rec.checkpoint)
|
||||
|
||||
for {
|
||||
// Load/Store: there's a memory allocation to place `mk` into
|
||||
// an interface here.
|
||||
if actual, loaded := s.meter.current.LoadOrStore(mk, rec); loaded {
|
||||
// Existing record case. Cannot change rec here because if fail
|
||||
// will try to add rec again to avoid new allocations.
|
||||
oldRec := actual.(*record)
|
||||
if oldRec.refMapped.ref() {
|
||||
// At this moment it is guaranteed that the entry is in
|
||||
// the map and will not be removed.
|
||||
return oldRec
|
||||
}
|
||||
// This loaded entry is marked as unmapped (so Collect will remove
|
||||
// it from the map immediately), try again - this is a busy waiting
|
||||
// strategy to wait until Collect() removes this entry from the map.
|
||||
//
|
||||
// This can be improved by having a list of "Unmapped" entries for
|
||||
// one time only usages, OR we can make this a blocking path and use
|
||||
// a Mutex that protects the delete operation (delete only if the old
|
||||
// record is associated with the key).
|
||||
|
||||
// Let collector get work done to remove the entry from the map.
|
||||
runtime.Gosched()
|
||||
continue
|
||||
}
|
||||
// The new entry was added to the map, good to go.
|
||||
return rec
|
||||
}
|
||||
}
|
||||
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (s *syncInstrument) Bind(kvs []attribute.KeyValue) metric.BoundSyncImpl {
|
||||
return s.acquireHandle(kvs, nil)
|
||||
}
|
||||
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs []attribute.KeyValue) {
|
||||
h := s.acquireHandle(kvs, nil)
|
||||
defer h.Unbind()
|
||||
h.RecordOne(ctx, num)
|
||||
}
|
||||
|
||||
// NewAccumulator constructs a new Accumulator for the given
|
||||
// processor. This Accumulator supports only a single processor.
|
||||
//
|
||||
// The Accumulator does not start any background process to collect itself
|
||||
// periodically, this responsibility lies with the processor, typically,
|
||||
// depending on the type of export. For example, a pull-based
|
||||
// processor will call Collect() when it receives a request to scrape
|
||||
// current metric values. A push-based processor should configure its
|
||||
// own periodic collection.
|
||||
func NewAccumulator(processor export.Processor, resource *resource.Resource) *Accumulator {
|
||||
return &Accumulator{
|
||||
processor: processor,
|
||||
asyncInstruments: internal.NewAsyncInstrumentState(),
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSyncInstrument implements metric.MetricImpl.
|
||||
func (m *Accumulator) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||
return &syncInstrument{
|
||||
instrument: instrument{
|
||||
descriptor: descriptor,
|
||||
meter: m,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewAsyncInstrument implements metric.MetricImpl.
|
||||
func (m *Accumulator) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) {
|
||||
a := &asyncInstrument{
|
||||
instrument: instrument{
|
||||
descriptor: descriptor,
|
||||
meter: m,
|
||||
},
|
||||
}
|
||||
m.asyncLock.Lock()
|
||||
defer m.asyncLock.Unlock()
|
||||
m.asyncInstruments.Register(a, runner)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Collect traverses the list of active records and observers and
|
||||
// exports data for each active instrument. Collect() may not be
|
||||
// called concurrently.
|
||||
//
|
||||
// During the collection pass, the export.Processor will receive
|
||||
// one Export() call per current aggregation.
|
||||
//
|
||||
// Returns the number of records that were checkpointed.
|
||||
func (m *Accumulator) Collect(ctx context.Context) int {
|
||||
m.collectLock.Lock()
|
||||
defer m.collectLock.Unlock()
|
||||
|
||||
checkpointed := m.observeAsyncInstruments(ctx)
|
||||
checkpointed += m.collectSyncInstruments()
|
||||
m.currentEpoch++
|
||||
|
||||
return checkpointed
|
||||
}
|
||||
|
||||
func (m *Accumulator) collectSyncInstruments() int {
|
||||
checkpointed := 0
|
||||
|
||||
m.current.Range(func(key interface{}, value interface{}) bool {
|
||||
// Note: always continue to iterate over the entire
|
||||
// map by returning `true` in this function.
|
||||
inuse := value.(*record)
|
||||
|
||||
mods := atomic.LoadInt64(&inuse.updateCount)
|
||||
coll := inuse.collectedCount
|
||||
|
||||
if mods != coll {
|
||||
// Updates happened in this interval,
|
||||
// checkpoint and continue.
|
||||
checkpointed += m.checkpointRecord(inuse)
|
||||
inuse.collectedCount = mods
|
||||
return true
|
||||
}
|
||||
|
||||
// Having no updates since last collection, try to unmap:
|
||||
if unmapped := inuse.refMapped.tryUnmap(); !unmapped {
|
||||
// The record is referenced by a binding, continue.
|
||||
return true
|
||||
}
|
||||
|
||||
// If any other goroutines are now trying to re-insert this
|
||||
// entry in the map, they are busy calling Gosched() awaiting
|
||||
// this deletion:
|
||||
m.current.Delete(inuse.mapkey())
|
||||
|
||||
// There's a potential race between `LoadInt64` and
|
||||
// `tryUnmap` in this function. Since this is the
|
||||
// last we'll see of this record, checkpoint
|
||||
mods = atomic.LoadInt64(&inuse.updateCount)
|
||||
if mods != coll {
|
||||
checkpointed += m.checkpointRecord(inuse)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return checkpointed
|
||||
}
|
||||
|
||||
// CollectAsync implements internal.AsyncCollector.
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (m *Accumulator) CollectAsync(kv []attribute.KeyValue, obs ...metric.Observation) {
|
||||
labels := attribute.NewSetWithSortable(kv, &m.asyncSortSlice)
|
||||
|
||||
for _, ob := range obs {
|
||||
if a := m.fromAsync(ob.AsyncImpl()); a != nil {
|
||||
a.observe(ob.Number(), &labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int {
|
||||
m.asyncLock.Lock()
|
||||
defer m.asyncLock.Unlock()
|
||||
|
||||
asyncCollected := 0
|
||||
|
||||
m.asyncInstruments.Run(ctx, m)
|
||||
|
||||
for _, inst := range m.asyncInstruments.Instruments() {
|
||||
if a := m.fromAsync(inst); a != nil {
|
||||
asyncCollected += m.checkpointAsync(a)
|
||||
}
|
||||
}
|
||||
|
||||
return asyncCollected
|
||||
}
|
||||
|
||||
func (m *Accumulator) checkpointRecord(r *record) int {
|
||||
if r.current == nil {
|
||||
return 0
|
||||
}
|
||||
err := r.current.SynchronizedMove(r.checkpoint, &r.inst.descriptor)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return 0
|
||||
}
|
||||
|
||||
a := export.NewAccumulation(&r.inst.descriptor, r.labels, m.resource, r.checkpoint)
|
||||
err = m.processor.Process(a)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (m *Accumulator) checkpointAsync(a *asyncInstrument) int {
|
||||
if len(a.recorders) == 0 {
|
||||
return 0
|
||||
}
|
||||
checkpointed := 0
|
||||
for encodedLabels, lrec := range a.recorders {
|
||||
lrec := lrec
|
||||
epochDiff := m.currentEpoch - lrec.observedEpoch
|
||||
if epochDiff == 0 {
|
||||
if lrec.observed != nil {
|
||||
a := export.NewAccumulation(&a.descriptor, lrec.labels, m.resource, lrec.observed)
|
||||
err := m.processor.Process(a)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
checkpointed++
|
||||
}
|
||||
} else if epochDiff > 1 {
|
||||
// This is second collection cycle with no
|
||||
// observations for this labelset. Remove the
|
||||
// recorder.
|
||||
delete(a.recorders, encodedLabels)
|
||||
}
|
||||
}
|
||||
if len(a.recorders) == 0 {
|
||||
a.recorders = nil
|
||||
}
|
||||
return checkpointed
|
||||
}
|
||||
|
||||
// RecordBatch enters a batch of metric events.
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (m *Accumulator) RecordBatch(ctx context.Context, kvs []attribute.KeyValue, measurements ...metric.Measurement) {
|
||||
// Labels will be computed the first time acquireHandle is
|
||||
// called. Subsequent calls to acquireHandle will re-use the
|
||||
// previously computed value instead of recomputing the
|
||||
// ordered labels.
|
||||
var labelsPtr *attribute.Set
|
||||
for i, meas := range measurements {
|
||||
s := m.fromSync(meas.SyncImpl())
|
||||
if s == nil {
|
||||
continue
|
||||
}
|
||||
h := s.acquireHandle(kvs, labelsPtr)
|
||||
|
||||
// Re-use labels for the next measurement.
|
||||
if i == 0 {
|
||||
labelsPtr = h.labels
|
||||
}
|
||||
|
||||
defer h.Unbind()
|
||||
h.RecordOne(ctx, meas.Number())
|
||||
}
|
||||
}
|
||||
|
||||
// RecordOne implements metric.SyncImpl.
|
||||
func (r *record) RecordOne(ctx context.Context, num number.Number) {
|
||||
if r.current == nil {
|
||||
// The instrument is disabled according to the AggregatorSelector.
|
||||
return
|
||||
}
|
||||
if err := aggregator.RangeTest(num, &r.inst.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
if err := r.current.Update(ctx, num, &r.inst.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
// Record was modified, inform the Collect() that things need
|
||||
// to be collected while the record is still mapped.
|
||||
atomic.AddInt64(&r.updateCount, 1)
|
||||
}
|
||||
|
||||
// Unbind implements metric.SyncImpl.
|
||||
func (r *record) Unbind() {
|
||||
r.refMapped.unref()
|
||||
}
|
||||
|
||||
func (r *record) mapkey() mapkey {
|
||||
return mapkey{
|
||||
descriptor: &r.inst.descriptor,
|
||||
ordered: r.labels.Equivalent(),
|
||||
}
|
||||
}
|
||||
|
||||
// fromSync gets a sync implementation object, checking for
|
||||
// uninitialized instruments and instruments created by another SDK.
|
||||
func (m *Accumulator) fromSync(sync metric.SyncImpl) *syncInstrument {
|
||||
if sync != nil {
|
||||
if inst, ok := sync.Implementation().(*syncInstrument); ok {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
otel.Handle(ErrUninitializedInstrument)
|
||||
return nil
|
||||
}
|
||||
|
||||
// fromSync gets an async implementation object, checking for
|
||||
// uninitialized instruments and instruments created by another SDK.
|
||||
func (m *Accumulator) fromAsync(async metric.AsyncImpl) *asyncInstrument {
|
||||
if async != nil {
|
||||
if inst, ok := async.Implementation().(*asyncInstrument); ok {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
otel.Handle(ErrUninitializedInstrument)
|
||||
return nil
|
||||
}
|
120
vendor/go.opentelemetry.io/otel/sdk/metric/selector/simple/simple.go
generated
vendored
120
vendor/go.opentelemetry.io/otel/sdk/metric/selector/simple/simple.go
generated
vendored
@ -1,120 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
)
|
||||
|
||||
type (
|
||||
selectorInexpensive struct{}
|
||||
selectorExact struct{}
|
||||
selectorHistogram struct {
|
||||
options []histogram.Option
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
_ export.AggregatorSelector = selectorInexpensive{}
|
||||
_ export.AggregatorSelector = selectorExact{}
|
||||
_ export.AggregatorSelector = selectorHistogram{}
|
||||
)
|
||||
|
||||
// NewWithInexpensiveDistribution returns a simple aggregator selector
|
||||
// that uses minmaxsumcount aggregators for `ValueRecorder`
|
||||
// instruments. This selector is faster and uses less memory than the
|
||||
// others in this package because minmaxsumcount aggregators maintain
|
||||
// the least information about the distribution among these choices.
|
||||
func NewWithInexpensiveDistribution() export.AggregatorSelector {
|
||||
return selectorInexpensive{}
|
||||
}
|
||||
|
||||
// NewWithExactDistribution returns a simple aggregator selector that
|
||||
// uses exact aggregators for `ValueRecorder` instruments. This
|
||||
// selector uses more memory than the others in this package because
|
||||
// exact aggregators maintain the most information about the
|
||||
// distribution among these choices.
|
||||
func NewWithExactDistribution() export.AggregatorSelector {
|
||||
return selectorExact{}
|
||||
}
|
||||
|
||||
// NewWithHistogramDistribution returns a simple aggregator selector
|
||||
// that uses histogram aggregators for `ValueRecorder` instruments.
|
||||
// This selector is a good default choice for most metric exporters.
|
||||
func NewWithHistogramDistribution(options ...histogram.Option) export.AggregatorSelector {
|
||||
return selectorHistogram{options: options}
|
||||
}
|
||||
|
||||
func sumAggs(aggPtrs []*export.Aggregator) {
|
||||
aggs := sum.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
}
|
||||
|
||||
func lastValueAggs(aggPtrs []*export.Aggregator) {
|
||||
aggs := lastvalue.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) {
|
||||
switch descriptor.InstrumentKind() {
|
||||
case metric.ValueObserverInstrumentKind:
|
||||
lastValueAggs(aggPtrs)
|
||||
case metric.ValueRecorderInstrumentKind:
|
||||
aggs := minmaxsumcount.New(len(aggPtrs), descriptor)
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
default:
|
||||
sumAggs(aggPtrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (selectorExact) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) {
|
||||
switch descriptor.InstrumentKind() {
|
||||
case metric.ValueObserverInstrumentKind:
|
||||
lastValueAggs(aggPtrs)
|
||||
case metric.ValueRecorderInstrumentKind:
|
||||
aggs := exact.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
default:
|
||||
sumAggs(aggPtrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) {
|
||||
switch descriptor.InstrumentKind() {
|
||||
case metric.ValueObserverInstrumentKind:
|
||||
lastValueAggs(aggPtrs)
|
||||
case metric.ValueRecorderInstrumentKind:
|
||||
aggs := histogram.New(len(aggPtrs), descriptor, s.options...)
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
default:
|
||||
sumAggs(aggPtrs)
|
||||
}
|
||||
}
|
12
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
12
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
@ -27,14 +27,19 @@ var (
|
||||
ErrPartialResource = errors.New("partial resource")
|
||||
)
|
||||
|
||||
// Detector detects OpenTelemetry resource information
|
||||
// Detector detects OpenTelemetry resource information.
|
||||
type Detector interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Detect returns an initialized Resource based on gathered information.
|
||||
// If the source information to construct a Resource contains invalid
|
||||
// values, a Resource is returned with the valid parts of the source
|
||||
// information used for initialization along with an appropriately
|
||||
// wrapped ErrPartialResource error.
|
||||
Detect(ctx context.Context) (*Resource, error)
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// Detect calls all input detectors sequentially and merges each result with the previous one.
|
||||
@ -53,7 +58,10 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
autoDetectedRes = Merge(autoDetectedRes, res)
|
||||
autoDetectedRes, err = Merge(autoDetectedRes, res)
|
||||
if err != nil {
|
||||
errInfo = append(errInfo, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var aggregatedError error
|
||||
|
41
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
41
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
@ -22,42 +22,44 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/semconv"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
type (
|
||||
// TelemetrySDK is a Detector that provides information about
|
||||
// telemetrySDK is a Detector that provides information about
|
||||
// the OpenTelemetry SDK used. This Detector is included as a
|
||||
// builtin. If these resource attributes are not wanted, use
|
||||
// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
|
||||
// explicitly disable them.
|
||||
TelemetrySDK struct{}
|
||||
telemetrySDK struct{}
|
||||
|
||||
// Host is a Detector that provides information about the host
|
||||
// host is a Detector that provides information about the host
|
||||
// being run on. This Detector is included as a builtin. If
|
||||
// these resource attributes are not wanted, use the
|
||||
// WithHost(nil) or WithoutBuiltin() options to explicitly
|
||||
// disable them.
|
||||
Host struct{}
|
||||
host struct{}
|
||||
|
||||
stringDetector struct {
|
||||
K attribute.Key
|
||||
F func() (string, error)
|
||||
schemaURL string
|
||||
K attribute.Key
|
||||
F func() (string, error)
|
||||
}
|
||||
|
||||
defaultServiceNameDetector struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
_ Detector = TelemetrySDK{}
|
||||
_ Detector = Host{}
|
||||
_ Detector = telemetrySDK{}
|
||||
_ Detector = host{}
|
||||
_ Detector = stringDetector{}
|
||||
_ Detector = defaultServiceNameDetector{}
|
||||
)
|
||||
|
||||
// Detect returns a *Resource that describes the OpenTelemetry SDK used.
|
||||
func (TelemetrySDK) Detect(context.Context) (*Resource, error) {
|
||||
func (telemetrySDK) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.TelemetrySDKNameKey.String("opentelemetry"),
|
||||
semconv.TelemetrySDKLanguageKey.String("go"),
|
||||
semconv.TelemetrySDKVersionKey.String(otel.Version()),
|
||||
@ -65,17 +67,19 @@ func (TelemetrySDK) Detect(context.Context) (*Resource, error) {
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the host being run on.
|
||||
func (Host) Detect(ctx context.Context) (*Resource, error) {
|
||||
return StringDetector(semconv.HostNameKey, os.Hostname).Detect(ctx)
|
||||
func (host) Detect(ctx context.Context) (*Resource, error) {
|
||||
return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
|
||||
}
|
||||
|
||||
// StringDetector returns a Detector that will produce a *Resource
|
||||
// containing the string as a value corresponding to k.
|
||||
func StringDetector(k attribute.Key, f func() (string, error)) Detector {
|
||||
return stringDetector{K: k, F: f}
|
||||
// containing the string as a value corresponding to k. The resulting Resource
|
||||
// will have the specified schemaURL.
|
||||
func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
|
||||
return stringDetector{schemaURL: schemaURL, K: k, F: f}
|
||||
}
|
||||
|
||||
// Detect implements Detector.
|
||||
// Detect returns a *Resource that describes the string as a value
|
||||
// corresponding to attribute.Key as well as the specific schemaURL.
|
||||
func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
value, err := sd.F()
|
||||
if err != nil {
|
||||
@ -85,12 +89,13 @@ func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
if !a.Valid() {
|
||||
return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
|
||||
}
|
||||
return NewWithAttributes(sd.K.String(value)), nil
|
||||
return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
|
||||
}
|
||||
|
||||
// Detect implements Detector
|
||||
// Detect implements Detector.
|
||||
func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return StringDetector(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey,
|
||||
func() (string, error) {
|
||||
executable, err := os.Executable()
|
||||
|
228
vendor/go.opentelemetry.io/otel/sdk/resource/config.go
generated
vendored
228
vendor/go.opentelemetry.io/otel/sdk/resource/config.go
generated
vendored
@ -24,35 +24,16 @@ import (
|
||||
type config struct {
|
||||
// detectors that will be evaluated.
|
||||
detectors []Detector
|
||||
|
||||
// telemetrySDK is used to specify non-default
|
||||
// `telemetry.sdk.*` attributes.
|
||||
telemetrySDK Detector
|
||||
|
||||
// HostResource is used to specify non-default `host.*`
|
||||
// attributes.
|
||||
host Detector
|
||||
|
||||
// FromEnv is used to specify non-default OTEL_RESOURCE_ATTRIBUTES
|
||||
// attributes.
|
||||
fromEnv Detector
|
||||
// SchemaURL to associate with the Resource.
|
||||
schemaURL string
|
||||
}
|
||||
|
||||
// Option is the interface that applies a configuration option.
|
||||
type Option interface {
|
||||
// Apply sets the Option value of a config.
|
||||
Apply(*config)
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
// apply sets the Option value of a config.
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
type option struct{}
|
||||
|
||||
func (option) private() {}
|
||||
|
||||
// WithAttributes adds attributes to the configured Resource.
|
||||
func WithAttributes(attributes ...attribute.KeyValue) Option {
|
||||
return WithDetectors(detectAttributes{attributes})
|
||||
@ -63,7 +44,7 @@ type detectAttributes struct {
|
||||
}
|
||||
|
||||
func (d detectAttributes) Detect(context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(d.attributes...), nil
|
||||
return NewSchemaless(d.attributes...), nil
|
||||
}
|
||||
|
||||
// WithDetectors adds detectors to be evaluated for the configured resource.
|
||||
@ -72,94 +53,147 @@ func WithDetectors(detectors ...Detector) Option {
|
||||
}
|
||||
|
||||
type detectorsOption struct {
|
||||
option
|
||||
detectors []Detector
|
||||
}
|
||||
|
||||
// Apply implements Option.
|
||||
func (o detectorsOption) Apply(cfg *config) {
|
||||
func (o detectorsOption) apply(cfg config) config {
|
||||
cfg.detectors = append(cfg.detectors, o.detectors...)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithTelemetrySDK overrides the builtin `telemetry.sdk.*`
|
||||
// attributes. Use nil to disable these attributes entirely.
|
||||
func WithTelemetrySDK(d Detector) Option {
|
||||
return telemetrySDKOption{Detector: d}
|
||||
// WithFromEnv adds attributes from environment variables to the configured resource.
|
||||
func WithFromEnv() Option {
|
||||
return WithDetectors(fromEnv{})
|
||||
}
|
||||
|
||||
type telemetrySDKOption struct {
|
||||
option
|
||||
Detector
|
||||
// WithHost adds attributes from the host to the configured resource.
|
||||
func WithHost() Option {
|
||||
return WithDetectors(host{})
|
||||
}
|
||||
|
||||
// Apply implements Option.
|
||||
func (o telemetrySDKOption) Apply(cfg *config) {
|
||||
cfg.telemetrySDK = o.Detector
|
||||
// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
|
||||
func WithTelemetrySDK() Option {
|
||||
return WithDetectors(telemetrySDK{})
|
||||
}
|
||||
|
||||
// WithHost overrides the builtin `host.*` attributes. Use nil to
|
||||
// disable these attributes entirely.
|
||||
func WithHost(d Detector) Option {
|
||||
return hostOption{Detector: d}
|
||||
// WithSchemaURL sets the schema URL for the configured resource.
|
||||
func WithSchemaURL(schemaURL string) Option {
|
||||
return schemaURLOption(schemaURL)
|
||||
}
|
||||
|
||||
type hostOption struct {
|
||||
option
|
||||
Detector
|
||||
type schemaURLOption string
|
||||
|
||||
func (o schemaURLOption) apply(cfg config) config {
|
||||
cfg.schemaURL = string(o)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Apply implements Option.
|
||||
func (o hostOption) Apply(cfg *config) {
|
||||
cfg.host = o.Detector
|
||||
}
|
||||
|
||||
// WithFromEnv overrides the builtin detector for
|
||||
// OTEL_RESOURCE_ATTRIBUTES. Use nil to disable environment checking.
|
||||
func WithFromEnv(d Detector) Option {
|
||||
return fromEnvOption{Detector: d}
|
||||
}
|
||||
|
||||
type fromEnvOption struct {
|
||||
option
|
||||
Detector
|
||||
}
|
||||
|
||||
// Apply implements Option.
|
||||
func (o fromEnvOption) Apply(cfg *config) {
|
||||
cfg.fromEnv = o.Detector
|
||||
}
|
||||
|
||||
// WithoutBuiltin disables all the builtin detectors, including the
|
||||
// telemetry.sdk.*, host.*, and the environment detector.
|
||||
func WithoutBuiltin() Option {
|
||||
return noBuiltinOption{}
|
||||
}
|
||||
|
||||
type noBuiltinOption struct {
|
||||
option
|
||||
}
|
||||
|
||||
// Apply implements Option.
|
||||
func (o noBuiltinOption) Apply(cfg *config) {
|
||||
cfg.host = nil
|
||||
cfg.telemetrySDK = nil
|
||||
cfg.fromEnv = nil
|
||||
}
|
||||
|
||||
// New returns a Resource combined from the provided attributes,
|
||||
// user-provided detectors and builtin detectors.
|
||||
func New(ctx context.Context, opts ...Option) (*Resource, error) {
|
||||
cfg := config{
|
||||
telemetrySDK: TelemetrySDK{},
|
||||
host: Host{},
|
||||
fromEnv: FromEnv{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(&cfg)
|
||||
}
|
||||
detectors := append(
|
||||
[]Detector{cfg.telemetrySDK, cfg.host, cfg.fromEnv},
|
||||
cfg.detectors...,
|
||||
// WithOS adds all the OS attributes to the configured Resource.
|
||||
// See individual WithOS* functions to configure specific attributes.
|
||||
func WithOS() Option {
|
||||
return WithDetectors(
|
||||
osTypeDetector{},
|
||||
osDescriptionDetector{},
|
||||
)
|
||||
return Detect(ctx, detectors...)
|
||||
}
|
||||
|
||||
// WithOSType adds an attribute with the operating system type to the configured Resource.
|
||||
func WithOSType() Option {
|
||||
return WithDetectors(osTypeDetector{})
|
||||
}
|
||||
|
||||
// WithOSDescription adds an attribute with the operating system description to the
|
||||
// configured Resource. The formatted string is equivalent to the output of the
|
||||
// `uname -snrvm` command.
|
||||
func WithOSDescription() Option {
|
||||
return WithDetectors(osDescriptionDetector{})
|
||||
}
|
||||
|
||||
// WithProcess adds all the Process attributes to the configured Resource.
|
||||
//
|
||||
// Warning! This option will include process command line arguments. If these
|
||||
// contain sensitive information it will be included in the exported resource.
|
||||
//
|
||||
// This option is equivalent to calling WithProcessPID,
|
||||
// WithProcessExecutableName, WithProcessExecutablePath,
|
||||
// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
|
||||
// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
|
||||
// option function for information about what resource attributes each
|
||||
// includes.
|
||||
func WithProcess() Option {
|
||||
return WithDetectors(
|
||||
processPIDDetector{},
|
||||
processExecutableNameDetector{},
|
||||
processExecutablePathDetector{},
|
||||
processCommandArgsDetector{},
|
||||
processOwnerDetector{},
|
||||
processRuntimeNameDetector{},
|
||||
processRuntimeVersionDetector{},
|
||||
processRuntimeDescriptionDetector{},
|
||||
)
|
||||
}
|
||||
|
||||
// WithProcessPID adds an attribute with the process identifier (PID) to the
|
||||
// configured Resource.
|
||||
func WithProcessPID() Option {
|
||||
return WithDetectors(processPIDDetector{})
|
||||
}
|
||||
|
||||
// WithProcessExecutableName adds an attribute with the name of the process
|
||||
// executable to the configured Resource.
|
||||
func WithProcessExecutableName() Option {
|
||||
return WithDetectors(processExecutableNameDetector{})
|
||||
}
|
||||
|
||||
// WithProcessExecutablePath adds an attribute with the full path to the process
|
||||
// executable to the configured Resource.
|
||||
func WithProcessExecutablePath() Option {
|
||||
return WithDetectors(processExecutablePathDetector{})
|
||||
}
|
||||
|
||||
// WithProcessCommandArgs adds an attribute with all the command arguments (including
|
||||
// the command/executable itself) as received by the process to the configured
|
||||
// Resource.
|
||||
//
|
||||
// Warning! This option will include process command line arguments. If these
|
||||
// contain sensitive information it will be included in the exported resource.
|
||||
func WithProcessCommandArgs() Option {
|
||||
return WithDetectors(processCommandArgsDetector{})
|
||||
}
|
||||
|
||||
// WithProcessOwner adds an attribute with the username of the user that owns the process
|
||||
// to the configured Resource.
|
||||
func WithProcessOwner() Option {
|
||||
return WithDetectors(processOwnerDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeName adds an attribute with the name of the runtime of this
|
||||
// process to the configured Resource.
|
||||
func WithProcessRuntimeName() Option {
|
||||
return WithDetectors(processRuntimeNameDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
|
||||
// this process to the configured Resource.
|
||||
func WithProcessRuntimeVersion() Option {
|
||||
return WithDetectors(processRuntimeVersionDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeDescription adds an attribute with an additional description
|
||||
// about the runtime of the process to the configured Resource.
|
||||
func WithProcessRuntimeDescription() Option {
|
||||
return WithDetectors(processRuntimeDescriptionDetector{})
|
||||
}
|
||||
|
||||
// WithContainer adds all the Container attributes to the configured Resource.
|
||||
// See individual WithContainer* functions to configure specific attributes.
|
||||
func WithContainer() Option {
|
||||
return WithDetectors(
|
||||
cgroupContainerIDDetector{},
|
||||
)
|
||||
}
|
||||
|
||||
// WithContainerID adds an attribute with the id of the container to the configured Resource.
|
||||
func WithContainerID() Option {
|
||||
return WithDetectors(cgroupContainerIDDetector{})
|
||||
}
|
||||
|
100
vendor/go.opentelemetry.io/otel/sdk/resource/container.go
generated
vendored
Normal file
100
vendor/go.opentelemetry.io/otel/sdk/resource/container.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
type containerIDProvider func() (string, error)
|
||||
|
||||
var (
|
||||
containerID containerIDProvider = getContainerIDFromCGroup
|
||||
cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
|
||||
)
|
||||
|
||||
type cgroupContainerIDDetector struct{}
|
||||
|
||||
const cgroupPath = "/proc/self/cgroup"
|
||||
|
||||
// Detect returns a *Resource that describes the id of the container.
|
||||
// If no container id found, an empty resource will be returned.
|
||||
func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
containerID, err := containerID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerID == "" {
|
||||
return Empty(), nil
|
||||
}
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil
|
||||
}
|
||||
|
||||
var (
|
||||
defaultOSStat = os.Stat
|
||||
osStat = defaultOSStat
|
||||
|
||||
defaultOSOpen = func(name string) (io.ReadCloser, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
osOpen = defaultOSOpen
|
||||
)
|
||||
|
||||
// getContainerIDFromCGroup returns the id of the container from the cgroup file.
|
||||
// If no container id found, an empty string will be returned.
|
||||
func getContainerIDFromCGroup() (string, error) {
|
||||
if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
|
||||
// File does not exist, skip
|
||||
return "", nil
|
||||
}
|
||||
|
||||
file, err := osOpen(cgroupPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return getContainerIDFromReader(file), nil
|
||||
}
|
||||
|
||||
// getContainerIDFromReader returns the id of the container from reader.
|
||||
func getContainerIDFromReader(reader io.Reader) string {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if id := getContainerIDFromLine(line); id != "" {
|
||||
return id
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getContainerIDFromLine returns the id of the container from one string line.
|
||||
func getContainerIDFromLine(line string) string {
|
||||
matches := cgroupContainerIDRe.FindStringSubmatch(line)
|
||||
if len(matches) <= 1 {
|
||||
return ""
|
||||
}
|
||||
return matches[1]
|
||||
}
|
4
vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
generated
vendored
@ -14,10 +14,6 @@
|
||||
|
||||
// Package resource provides detecting and representing resources.
|
||||
//
|
||||
// This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
// may be introduced in subsequent minor version releases as we work to track
|
||||
// the evolving OpenTelemetry specification and user feedback.
|
||||
//
|
||||
// The fundamental struct is a Resource which holds identifying information
|
||||
// about the entities for which telemetry is exported.
|
||||
//
|
||||
|
57
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
57
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
@ -21,37 +21,64 @@ import (
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
// envVar is the environment variable name OpenTelemetry Resource information can be assigned to.
|
||||
const envVar = "OTEL_RESOURCE_ATTRIBUTES"
|
||||
const (
|
||||
// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
|
||||
resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES"
|
||||
|
||||
// svcNameKey is the environment variable name that Service Name information will be read from.
|
||||
svcNameKey = "OTEL_SERVICE_NAME"
|
||||
)
|
||||
|
||||
var (
|
||||
// errMissingValue is returned when a resource value is missing.
|
||||
errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
|
||||
)
|
||||
|
||||
// FromEnv is a Detector that implements the Detector and collects
|
||||
// fromEnv is a Detector that implements the Detector and collects
|
||||
// resources from environment. This Detector is included as a
|
||||
// builtin. If these resource attributes are not wanted, use the
|
||||
// WithFromEnv(nil) or WithoutBuiltin() options to explicitly disable
|
||||
// them.
|
||||
type FromEnv struct{}
|
||||
// builtin.
|
||||
type fromEnv struct{}
|
||||
|
||||
// compile time assertion that FromEnv implements Detector interface
|
||||
var _ Detector = FromEnv{}
|
||||
// compile time assertion that FromEnv implements Detector interface.
|
||||
var _ Detector = fromEnv{}
|
||||
|
||||
// Detect collects resources from environment
|
||||
func (FromEnv) Detect(context.Context) (*Resource, error) {
|
||||
attrs := strings.TrimSpace(os.Getenv(envVar))
|
||||
// Detect collects resources from environment.
|
||||
func (fromEnv) Detect(context.Context) (*Resource, error) {
|
||||
attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
|
||||
svcName := strings.TrimSpace(os.Getenv(svcNameKey))
|
||||
|
||||
if attrs == "" {
|
||||
if attrs == "" && svcName == "" {
|
||||
return Empty(), nil
|
||||
}
|
||||
return constructOTResources(attrs)
|
||||
|
||||
var res *Resource
|
||||
|
||||
if svcName != "" {
|
||||
res = NewSchemaless(semconv.ServiceNameKey.String(svcName))
|
||||
}
|
||||
|
||||
r2, err := constructOTResources(attrs)
|
||||
|
||||
// Ensure that the resource with the service name from OTEL_SERVICE_NAME
|
||||
// takes precedence, if it was defined.
|
||||
res, err2 := Merge(r2, res)
|
||||
|
||||
if err == nil {
|
||||
err = err2
|
||||
} else if err2 != nil {
|
||||
err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func constructOTResources(s string) (*Resource, error) {
|
||||
if s == "" {
|
||||
return Empty(), nil
|
||||
}
|
||||
pairs := strings.Split(s, ",")
|
||||
attrs := []attribute.KeyValue{}
|
||||
var invalid []string
|
||||
@ -68,5 +95,5 @@ func constructOTResources(s string) (*Resource, error) {
|
||||
if len(invalid) > 0 {
|
||||
err = fmt.Errorf("%w: %v", errMissingValue, invalid)
|
||||
}
|
||||
return NewWithAttributes(attrs...), err
|
||||
return NewSchemaless(attrs...), err
|
||||
}
|
||||
|
68
vendor/go.opentelemetry.io/otel/sdk/resource/os.go
generated
vendored
68
vendor/go.opentelemetry.io/otel/sdk/resource/os.go
generated
vendored
@ -18,22 +18,80 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/semconv"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
type osDescriptionProvider func() (string, error)
|
||||
|
||||
var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
|
||||
|
||||
var osDescription = defaultOSDescriptionProvider
|
||||
|
||||
func setDefaultOSDescriptionProvider() {
|
||||
setOSDescriptionProvider(defaultOSDescriptionProvider)
|
||||
}
|
||||
|
||||
func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
|
||||
osDescription = osDescriptionProvider
|
||||
}
|
||||
|
||||
type osTypeDetector struct{}
|
||||
type osDescriptionDetector struct{}
|
||||
|
||||
// Detect returns a *Resource that describes the operating system type the
|
||||
// service is running on.
|
||||
func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
osType := runtimeOS()
|
||||
|
||||
osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
|
||||
|
||||
return NewWithAttributes(
|
||||
semconv.OSTypeKey.String(strings.ToLower(osType)),
|
||||
semconv.SchemaURL,
|
||||
osTypeAttribute,
|
||||
), nil
|
||||
}
|
||||
|
||||
// WithOSType adds an attribute with the operating system type to the configured Resource.
|
||||
func WithOSType() Option {
|
||||
return WithDetectors(osTypeDetector{})
|
||||
// Detect returns a *Resource that describes the operating system the
|
||||
// service is running on.
|
||||
func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
description, err := osDescription()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.OSDescriptionKey.String(description),
|
||||
), nil
|
||||
}
|
||||
|
||||
// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
|
||||
// into an OS type attribute with the corresponding value defined by the semantic
|
||||
// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
|
||||
// and used as the value for the returned OS type attribute.
|
||||
func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
|
||||
// the elements in this map are the intersection between
|
||||
// available GOOS values and defined semconv OS types
|
||||
osTypeAttributeMap := map[string]attribute.KeyValue{
|
||||
"darwin": semconv.OSTypeDarwin,
|
||||
"dragonfly": semconv.OSTypeDragonflyBSD,
|
||||
"freebsd": semconv.OSTypeFreeBSD,
|
||||
"linux": semconv.OSTypeLinux,
|
||||
"netbsd": semconv.OSTypeNetBSD,
|
||||
"openbsd": semconv.OSTypeOpenBSD,
|
||||
"solaris": semconv.OSTypeSolaris,
|
||||
"windows": semconv.OSTypeWindows,
|
||||
}
|
||||
|
||||
var osTypeAttribute attribute.KeyValue
|
||||
|
||||
if attr, ok := osTypeAttributeMap[osType]; ok {
|
||||
osTypeAttribute = attr
|
||||
} else {
|
||||
osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
|
||||
}
|
||||
|
||||
return osTypeAttribute
|
||||
}
|
||||
|
102
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
generated
vendored
Normal file
102
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type plist struct {
|
||||
XMLName xml.Name `xml:"plist"`
|
||||
Dict dict `xml:"dict"`
|
||||
}
|
||||
|
||||
type dict struct {
|
||||
Key []string `xml:"key"`
|
||||
String []string `xml:"string"`
|
||||
}
|
||||
|
||||
// osRelease builds a string describing the operating system release based on the
|
||||
// contents of the property list (.plist) system files. If no .plist files are found,
|
||||
// or if the required properties to build the release description string are missing,
|
||||
// an empty string is returned instead. The generated string resembles the output of
|
||||
// the `sw_vers` commandline program, but in a single-line string. For more information
|
||||
// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
|
||||
func osRelease() string {
|
||||
file, err := getPlistFile()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
values, err := parsePlistFile(file)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return buildOSRelease(values)
|
||||
}
|
||||
|
||||
// getPlistFile returns a *os.File pointing to one of the well-known .plist files
|
||||
// available on macOS. If no file can be opened, it returns an error.
|
||||
func getPlistFile() (*os.File, error) {
|
||||
return getFirstAvailableFile([]string{
|
||||
"/System/Library/CoreServices/SystemVersion.plist",
|
||||
"/System/Library/CoreServices/ServerVersion.plist",
|
||||
})
|
||||
}
|
||||
|
||||
// parsePlistFile process the file pointed by `file` as a .plist file and returns
|
||||
// a map with the key-values for each pair of correlated <key> and <string> elements
|
||||
// contained in it.
|
||||
func parsePlistFile(file io.Reader) (map[string]string, error) {
|
||||
var v plist
|
||||
|
||||
err := xml.NewDecoder(file).Decode(&v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(v.Dict.Key) != len(v.Dict.String) {
|
||||
return nil, fmt.Errorf("the number of <key> and <string> elements doesn't match")
|
||||
}
|
||||
|
||||
properties := make(map[string]string, len(v.Dict.Key))
|
||||
for i, key := range v.Dict.Key {
|
||||
properties[key] = v.Dict.String[i]
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// buildOSRelease builds a string describing the OS release based on the properties
|
||||
// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
|
||||
// and `ProductBuildVersion` properties. If some of these properties are not found,
|
||||
// it returns an empty string.
|
||||
func buildOSRelease(properties map[string]string) string {
|
||||
productName := properties["ProductName"]
|
||||
productVersion := properties["ProductVersion"]
|
||||
productBuildVersion := properties["ProductBuildVersion"]
|
||||
|
||||
if productName == "" || productVersion == "" || productBuildVersion == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
|
||||
}
|
154
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
generated
vendored
Normal file
154
vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
|
||||
// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// osRelease builds a string describing the operating system release based on the
|
||||
// properties of the os-release file. If no os-release file is found, or if the
|
||||
// required properties to build the release description string are missing, an empty
|
||||
// string is returned instead. For more information about os-release files, see:
|
||||
// https://www.freedesktop.org/software/systemd/man/os-release.html
|
||||
func osRelease() string {
|
||||
file, err := getOSReleaseFile()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
values := parseOSReleaseFile(file)
|
||||
|
||||
return buildOSRelease(values)
|
||||
}
|
||||
|
||||
// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
|
||||
// files, according to their order of preference. If no file can be opened, it
|
||||
// returns an error.
|
||||
func getOSReleaseFile() (*os.File, error) {
|
||||
return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
|
||||
}
|
||||
|
||||
// parseOSReleaseFile process the file pointed by `file` as an os-release file and
|
||||
// returns a map with the key-values contained in it. Empty lines or lines starting
|
||||
// with a '#' character are ignored, as well as lines with the missing key=value
|
||||
// separator. Values are unquoted and unescaped.
|
||||
func parseOSReleaseFile(file io.Reader) map[string]string {
|
||||
values := make(map[string]string)
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if skip(line) {
|
||||
continue
|
||||
}
|
||||
|
||||
key, value, ok := parse(line)
|
||||
if ok {
|
||||
values[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
// skip returns true if the line is blank or starts with a '#' character, and
|
||||
// therefore should be skipped from processing.
|
||||
func skip(line string) bool {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
return len(line) == 0 || strings.HasPrefix(line, "#")
|
||||
}
|
||||
|
||||
// parse attempts to split the provided line on the first '=' character, and then
|
||||
// sanitize each side of the split before returning them as a key-value pair.
|
||||
func parse(line string) (string, string, bool) {
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
|
||||
if len(parts) != 2 || len(parts[0]) == 0 {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(parts[0])
|
||||
value := unescape(unquote(strings.TrimSpace(parts[1])))
|
||||
|
||||
return key, value, true
|
||||
}
|
||||
|
||||
// unquote checks whether the string `s` is quoted with double or single quotes
|
||||
// and, if so, returns a version of the string without them. Otherwise it returns
|
||||
// the provided string unchanged.
|
||||
func unquote(s string) string {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
|
||||
if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// unescape removes the `\` prefix from some characters that are expected
|
||||
// to have it added in front of them for escaping purposes.
|
||||
func unescape(s string) string {
|
||||
return strings.NewReplacer(
|
||||
`\$`, `$`,
|
||||
`\"`, `"`,
|
||||
`\'`, `'`,
|
||||
`\\`, `\`,
|
||||
"\\`", "`",
|
||||
).Replace(s)
|
||||
}
|
||||
|
||||
// buildOSRelease builds a string describing the OS release based on the properties
|
||||
// available on the provided map. It favors a combination of the `NAME` and `VERSION`
|
||||
// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
|
||||
// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
|
||||
// none of these properties are found, it returns an empty string.
|
||||
//
|
||||
// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
|
||||
// Linux distributions, it doesn't include the same detail that can be found on the
|
||||
// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
|
||||
// other properties can produce "pretty" redundant strings in some cases.
|
||||
func buildOSRelease(values map[string]string) string {
|
||||
var osRelease string
|
||||
|
||||
name := values["NAME"]
|
||||
version := values["VERSION"]
|
||||
|
||||
if version == "" {
|
||||
version = values["VERSION_ID"]
|
||||
}
|
||||
|
||||
if name != "" && version != "" {
|
||||
osRelease = fmt.Sprintf("%s %s", name, version)
|
||||
} else {
|
||||
osRelease = values["PRETTY_NAME"]
|
||||
}
|
||||
|
||||
return osRelease
|
||||
}
|
90
vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
generated
vendored
Normal file
90
vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type unameProvider func(buf *unix.Utsname) (err error)
|
||||
|
||||
var defaultUnameProvider unameProvider = unix.Uname
|
||||
|
||||
var currentUnameProvider = defaultUnameProvider
|
||||
|
||||
func setDefaultUnameProvider() {
|
||||
setUnameProvider(defaultUnameProvider)
|
||||
}
|
||||
|
||||
func setUnameProvider(unameProvider unameProvider) {
|
||||
currentUnameProvider = unameProvider
|
||||
}
|
||||
|
||||
// platformOSDescription returns a human readable OS version information string.
|
||||
// The final string combines OS release information (where available) and the
|
||||
// result of the `uname` system call.
|
||||
func platformOSDescription() (string, error) {
|
||||
uname, err := uname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
osRelease := osRelease()
|
||||
if osRelease != "" {
|
||||
return fmt.Sprintf("%s (%s)", osRelease, uname), nil
|
||||
}
|
||||
|
||||
return uname, nil
|
||||
}
|
||||
|
||||
// uname issues a uname(2) system call (or equivalent on systems which doesn't
|
||||
// have one) and formats the output in a single string, similar to the output
|
||||
// of the `uname` commandline program. The final string resembles the one
|
||||
// obtained with a call to `uname -snrvm`.
|
||||
func uname() (string, error) {
|
||||
var utsName unix.Utsname
|
||||
|
||||
err := currentUnameProvider(&utsName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s %s %s %s",
|
||||
unix.ByteSliceToString(utsName.Sysname[:]),
|
||||
unix.ByteSliceToString(utsName.Nodename[:]),
|
||||
unix.ByteSliceToString(utsName.Release[:]),
|
||||
unix.ByteSliceToString(utsName.Version[:]),
|
||||
unix.ByteSliceToString(utsName.Machine[:]),
|
||||
), nil
|
||||
}
|
||||
|
||||
// getFirstAvailableFile returns an *os.File of the first available
|
||||
// file from a list of candidate file paths.
|
||||
func getFirstAvailableFile(candidates []string) (*os.File, error) {
|
||||
for _, c := range candidates {
|
||||
file, err := os.Open(c)
|
||||
if err == nil {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no candidate file available: %v", candidates)
|
||||
}
|
34
vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
generated
vendored
Normal file
34
vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !aix
|
||||
// +build !darwin
|
||||
// +build !dragonfly
|
||||
// +build !freebsd
|
||||
// +build !linux
|
||||
// +build !netbsd
|
||||
// +build !openbsd
|
||||
// +build !solaris
|
||||
// +build !windows
|
||||
// +build !zos
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
// platformOSDescription is a placeholder implementation for OSes
|
||||
// for which this project currently doesn't support os.description
|
||||
// attribute detection. See build tags declaration early on this file
|
||||
// for a list of unsupported OSes.
|
||||
func platformOSDescription() (string, error) {
|
||||
return "<unknown>", nil
|
||||
}
|
101
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
Normal file
101
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// platformOSDescription returns a human readable OS version information string.
|
||||
// It does so by querying registry values under the
|
||||
// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
|
||||
// resembles the one displayed by the Version Reporter Applet (winver.exe).
|
||||
func platformOSDescription() (string, error) {
|
||||
k, err := registry.OpenKey(
|
||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer k.Close()
|
||||
|
||||
var (
|
||||
productName = readProductName(k)
|
||||
displayVersion = readDisplayVersion(k)
|
||||
releaseID = readReleaseID(k)
|
||||
currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
|
||||
currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
|
||||
currentBuildNumber = readCurrentBuildNumber(k)
|
||||
ubr = readUBR(k)
|
||||
)
|
||||
|
||||
if displayVersion != "" {
|
||||
displayVersion += " "
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
|
||||
productName,
|
||||
displayVersion,
|
||||
releaseID,
|
||||
currentMajorVersionNumber,
|
||||
currentMinorVersionNumber,
|
||||
currentBuildNumber,
|
||||
ubr,
|
||||
), nil
|
||||
}
|
||||
|
||||
func getStringValue(name string, k registry.Key) string {
|
||||
value, _, _ := k.GetStringValue(name)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func getIntegerValue(name string, k registry.Key) uint64 {
|
||||
value, _, _ := k.GetIntegerValue(name)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func readProductName(k registry.Key) string {
|
||||
return getStringValue("ProductName", k)
|
||||
}
|
||||
|
||||
func readDisplayVersion(k registry.Key) string {
|
||||
return getStringValue("DisplayVersion", k)
|
||||
}
|
||||
|
||||
func readReleaseID(k registry.Key) string {
|
||||
return getStringValue("ReleaseID", k)
|
||||
}
|
||||
|
||||
func readCurrentMajorVersionNumber(k registry.Key) string {
|
||||
return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
|
||||
}
|
||||
|
||||
func readCurrentMinorVersionNumber(k registry.Key) string {
|
||||
return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
|
||||
}
|
||||
|
||||
func readCurrentBuildNumber(k registry.Key) string {
|
||||
return getStringValue("CurrentBuildNumber", k)
|
||||
}
|
||||
|
||||
func readUBR(k registry.Key) string {
|
||||
return strconv.FormatUint(getIntegerValue("UBR", k), 10)
|
||||
}
|
87
vendor/go.opentelemetry.io/otel/sdk/resource/process.go
generated
vendored
87
vendor/go.opentelemetry.io/otel/sdk/resource/process.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"go.opentelemetry.io/otel/semconv"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
|
||||
type pidProvider func() int
|
||||
@ -39,7 +39,12 @@ var (
|
||||
defaultExecutablePathProvider executablePathProvider = os.Executable
|
||||
defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
|
||||
defaultOwnerProvider ownerProvider = user.Current
|
||||
defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler }
|
||||
defaultRuntimeNameProvider runtimeNameProvider = func() string {
|
||||
if runtime.Compiler == "gc" {
|
||||
return "go"
|
||||
}
|
||||
return runtime.Compiler
|
||||
}
|
||||
defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
|
||||
defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
|
||||
defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
|
||||
@ -115,14 +120,14 @@ type processRuntimeDescriptionDetector struct{}
|
||||
// Detect returns a *Resource that describes the process identifier (PID) of the
|
||||
// executing process.
|
||||
func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.ProcessPIDKey.Int(pid())), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the name of the process executable.
|
||||
func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
executableName := filepath.Base(commandArgs()[0])
|
||||
|
||||
return NewWithAttributes(semconv.ProcessExecutableNameKey.String(executableName)), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the full path of the process executable.
|
||||
@ -132,13 +137,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewWithAttributes(semconv.ProcessExecutablePathKey.String(executablePath)), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes all the command arguments as received
|
||||
// by the process.
|
||||
func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.ProcessCommandArgsKey.Array(commandArgs())), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the username of the user that owns the
|
||||
@ -149,18 +154,18 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewWithAttributes(semconv.ProcessOwnerKey.String(owner.Username)), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the name of the compiler used to compile
|
||||
// this process image.
|
||||
func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.ProcessRuntimeNameKey.String(runtimeName())), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the version of the runtime of this process.
|
||||
func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
|
||||
return NewWithAttributes(semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil
|
||||
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil
|
||||
}
|
||||
|
||||
// Detect returns a *Resource that describes the runtime of this process.
|
||||
@ -169,69 +174,7 @@ func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource,
|
||||
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
|
||||
|
||||
return NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription),
|
||||
), nil
|
||||
}
|
||||
|
||||
// WithProcessPID adds an attribute with the process identifier (PID) to the
|
||||
// configured Resource.
|
||||
func WithProcessPID() Option {
|
||||
return WithDetectors(processPIDDetector{})
|
||||
}
|
||||
|
||||
// WithProcessExecutableName adds an attribute with the name of the process
|
||||
// executable to the configured Resource.
|
||||
func WithProcessExecutableName() Option {
|
||||
return WithDetectors(processExecutableNameDetector{})
|
||||
}
|
||||
|
||||
// WithProcessExecutablePath adds an attribute with the full path to the process
|
||||
// executable to the configured Resource.
|
||||
func WithProcessExecutablePath() Option {
|
||||
return WithDetectors(processExecutablePathDetector{})
|
||||
}
|
||||
|
||||
// WithProcessCommandArgs adds an attribute with all the command arguments (including
|
||||
// the command/executable itself) as received by the process the configured Resource.
|
||||
func WithProcessCommandArgs() Option {
|
||||
return WithDetectors(processCommandArgsDetector{})
|
||||
}
|
||||
|
||||
// WithProcessOwner adds an attribute with the username of the user that owns the process
|
||||
// to the configured Resource.
|
||||
func WithProcessOwner() Option {
|
||||
return WithDetectors(processOwnerDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeName adds an attribute with the name of the runtime of this
|
||||
// process to the configured Resource.
|
||||
func WithProcessRuntimeName() Option {
|
||||
return WithDetectors(processRuntimeNameDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
|
||||
// this process to the configured Resource.
|
||||
func WithProcessRuntimeVersion() Option {
|
||||
return WithDetectors(processRuntimeVersionDetector{})
|
||||
}
|
||||
|
||||
// WithProcessRuntimeDescription adds an attribute with an additional description
|
||||
// about the runtime of the process to the configured Resource.
|
||||
func WithProcessRuntimeDescription() Option {
|
||||
return WithDetectors(processRuntimeDescriptionDetector{})
|
||||
}
|
||||
|
||||
// WithProcess adds all the Process attributes to the configured Resource.
|
||||
// See individual WithProcess* functions to configure specific attributes.
|
||||
func WithProcess() Option {
|
||||
return WithDetectors(
|
||||
processPIDDetector{},
|
||||
processExecutableNameDetector{},
|
||||
processExecutablePathDetector{},
|
||||
processCommandArgsDetector{},
|
||||
processOwnerDetector{},
|
||||
processRuntimeNameDetector{},
|
||||
processRuntimeVersionDetector{},
|
||||
processRuntimeDescriptionDetector{},
|
||||
)
|
||||
}
|
||||
|
140
vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
generated
vendored
140
vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
generated
vendored
@ -16,6 +16,9 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@ -29,24 +32,53 @@ import (
|
||||
// (`*resource.Resource`). The `nil` value is equivalent to an empty
|
||||
// Resource.
|
||||
type Resource struct {
|
||||
attrs attribute.Set
|
||||
attrs attribute.Set
|
||||
schemaURL string
|
||||
}
|
||||
|
||||
var (
|
||||
emptyResource Resource
|
||||
|
||||
defaultResource *Resource = func(r *Resource, err error) *Resource {
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return r
|
||||
}(Detect(context.Background(), defaultServiceNameDetector{}, FromEnv{}, TelemetrySDK{}))
|
||||
emptyResource Resource
|
||||
defaultResource *Resource
|
||||
defaultResourceOnce sync.Once
|
||||
)
|
||||
|
||||
// NewWithAttributes creates a resource from attrs. If attrs contains
|
||||
// duplicate keys, the last value will be used. If attrs contains any invalid
|
||||
// items those items will be dropped.
|
||||
func NewWithAttributes(attrs ...attribute.KeyValue) *Resource {
|
||||
var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL")
|
||||
|
||||
// New returns a Resource combined from the user-provided detectors.
|
||||
func New(ctx context.Context, opts ...Option) (*Resource, error) {
|
||||
cfg := config{}
|
||||
for _, opt := range opts {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
|
||||
resource, err := Detect(ctx, cfg.detectors...)
|
||||
|
||||
var err2 error
|
||||
resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL})
|
||||
if err == nil {
|
||||
err = err2
|
||||
} else if err2 != nil {
|
||||
err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
|
||||
}
|
||||
|
||||
return resource, err
|
||||
}
|
||||
|
||||
// NewWithAttributes creates a resource from attrs and associates the resource with a
|
||||
// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
|
||||
// contains any invalid items those items will be dropped. The attrs are assumed to be
|
||||
// in a schema identified by schemaURL.
|
||||
func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
|
||||
resource := NewSchemaless(attrs...)
|
||||
resource.schemaURL = schemaURL
|
||||
return resource
|
||||
}
|
||||
|
||||
// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
|
||||
// the last value will be used. If attrs contains any invalid items those items will
|
||||
// be dropped. The resource will not be associated with a schema URL. If the schema
|
||||
// of the attrs is known use NewWithAttributes instead.
|
||||
func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
|
||||
if len(attrs) == 0 {
|
||||
return &emptyResource
|
||||
}
|
||||
@ -62,7 +94,7 @@ func NewWithAttributes(attrs ...attribute.KeyValue) *Resource {
|
||||
return &emptyResource
|
||||
}
|
||||
|
||||
return &Resource{s} //nolint
|
||||
return &Resource{attrs: s} //nolint
|
||||
}
|
||||
|
||||
// String implements the Stringer interface and provides a
|
||||
@ -77,6 +109,17 @@ func (r *Resource) String() string {
|
||||
return r.attrs.Encoded(attribute.DefaultEncoder())
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||
func (r *Resource) MarshalLog() interface{} {
|
||||
return struct {
|
||||
Attributes attribute.Set
|
||||
SchemaURL string
|
||||
}{
|
||||
Attributes: r.attrs,
|
||||
SchemaURL: r.schemaURL,
|
||||
}
|
||||
}
|
||||
|
||||
// Attributes returns a copy of attributes from the resource in a sorted order.
|
||||
// To avoid allocating a new slice, use an iterator.
|
||||
func (r *Resource) Attributes() []attribute.KeyValue {
|
||||
@ -86,7 +129,15 @@ func (r *Resource) Attributes() []attribute.KeyValue {
|
||||
return r.attrs.ToSlice()
|
||||
}
|
||||
|
||||
// Iter returns an interator of the Resource attributes.
|
||||
// SchemaURL returns the schema URL associated with Resource r.
|
||||
func (r *Resource) SchemaURL() string {
|
||||
if r == nil {
|
||||
return ""
|
||||
}
|
||||
return r.schemaURL
|
||||
}
|
||||
|
||||
// Iter returns an iterator of the Resource attributes.
|
||||
// This is ideal to use if you do not want a copy of the attributes.
|
||||
func (r *Resource) Iter() attribute.Iterator {
|
||||
if r == nil {
|
||||
@ -111,15 +162,33 @@ func (r *Resource) Equal(eq *Resource) bool {
|
||||
// If there are common keys between resource a and b, then the value
|
||||
// from resource b will overwrite the value from resource a, even
|
||||
// if resource b's value is empty.
|
||||
func Merge(a, b *Resource) *Resource {
|
||||
//
|
||||
// The SchemaURL of the resources will be merged according to the spec rules:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge
|
||||
// If the resources have different non-empty schemaURL an empty resource and an error
|
||||
// will be returned.
|
||||
func Merge(a, b *Resource) (*Resource, error) {
|
||||
if a == nil && b == nil {
|
||||
return Empty()
|
||||
return Empty(), nil
|
||||
}
|
||||
if a == nil {
|
||||
return b
|
||||
return b, nil
|
||||
}
|
||||
if b == nil {
|
||||
return a
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Merge the schema URL.
|
||||
var schemaURL string
|
||||
switch true {
|
||||
case a.schemaURL == "":
|
||||
schemaURL = b.schemaURL
|
||||
case b.schemaURL == "":
|
||||
schemaURL = a.schemaURL
|
||||
case a.schemaURL == b.schemaURL:
|
||||
schemaURL = a.schemaURL
|
||||
default:
|
||||
return Empty(), errMergeConflictSchemaURL
|
||||
}
|
||||
|
||||
// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
|
||||
@ -127,42 +196,59 @@ func Merge(a, b *Resource) *Resource {
|
||||
mi := attribute.NewMergeIterator(b.Set(), a.Set())
|
||||
combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
|
||||
for mi.Next() {
|
||||
combine = append(combine, mi.Label())
|
||||
combine = append(combine, mi.Attribute())
|
||||
}
|
||||
return NewWithAttributes(combine...)
|
||||
merged := NewWithAttributes(schemaURL, combine...)
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// Empty returns an instance of Resource with no attributes. It is
|
||||
// Empty returns an instance of Resource with no attributes. It is
|
||||
// equivalent to a `nil` Resource.
|
||||
func Empty() *Resource {
|
||||
return &emptyResource
|
||||
}
|
||||
|
||||
// Default returns an instance of Resource with a default
|
||||
// "service.name" and OpenTelemetrySDK attributes
|
||||
// "service.name" and OpenTelemetrySDK attributes.
|
||||
func Default() *Resource {
|
||||
defaultResourceOnce.Do(func() {
|
||||
var err error
|
||||
defaultResource, err = Detect(
|
||||
context.Background(),
|
||||
defaultServiceNameDetector{},
|
||||
fromEnv{},
|
||||
telemetrySDK{},
|
||||
)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
// If Detect did not return a valid resource, fall back to emptyResource.
|
||||
if defaultResource == nil {
|
||||
defaultResource = &emptyResource
|
||||
}
|
||||
})
|
||||
return defaultResource
|
||||
}
|
||||
|
||||
// Environment returns an instance of Resource with attributes
|
||||
// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
|
||||
func Environment() *Resource {
|
||||
detector := &FromEnv{}
|
||||
detector := &fromEnv{}
|
||||
resource, err := detector.Detect(context.Background())
|
||||
if err == nil {
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return resource
|
||||
}
|
||||
|
||||
// Equivalent returns an object that can be compared for equality
|
||||
// between two resources. This value is suitable for use as a key in
|
||||
// between two resources. This value is suitable for use as a key in
|
||||
// a map.
|
||||
func (r *Resource) Equivalent() attribute.Distinct {
|
||||
return r.Set().Equivalent()
|
||||
}
|
||||
|
||||
// Set returns the equivalent *attribute.Set of this resources attributes.
|
||||
// Set returns the equivalent *attribute.Set of this resource's attributes.
|
||||
func (r *Resource) Set() *attribute.Set {
|
||||
if r == nil {
|
||||
r = Empty()
|
||||
|
91
vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go
generated
vendored
91
vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// attributesMap is a capped map of attributes, holding the most recent attributes.
|
||||
// Eviction is done via a LRU method, the oldest entry is removed to create room for a new entry.
|
||||
// Updates are allowed and they refresh the usage of the key.
|
||||
//
|
||||
// This is based from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru.go
|
||||
// With a subset of the its operations and specific for holding attribute.KeyValue
|
||||
type attributesMap struct {
|
||||
attributes map[attribute.Key]*list.Element
|
||||
evictList *list.List
|
||||
droppedCount int
|
||||
capacity int
|
||||
}
|
||||
|
||||
func newAttributesMap(capacity int) *attributesMap {
|
||||
lm := &attributesMap{
|
||||
attributes: make(map[attribute.Key]*list.Element),
|
||||
evictList: list.New(),
|
||||
capacity: capacity,
|
||||
}
|
||||
return lm
|
||||
}
|
||||
|
||||
func (am *attributesMap) add(kv attribute.KeyValue) {
|
||||
// Check for existing item
|
||||
if ent, ok := am.attributes[kv.Key]; ok {
|
||||
am.evictList.MoveToFront(ent)
|
||||
ent.Value = &kv
|
||||
return
|
||||
}
|
||||
|
||||
// Add new item
|
||||
entry := am.evictList.PushFront(&kv)
|
||||
am.attributes[kv.Key] = entry
|
||||
|
||||
// Verify size not exceeded
|
||||
if am.evictList.Len() > am.capacity {
|
||||
am.removeOldest()
|
||||
am.droppedCount++
|
||||
}
|
||||
}
|
||||
|
||||
// toKeyValue copies the attributesMap into a slice of attribute.KeyValue and
|
||||
// returns it. If the map is empty, a nil is returned.
|
||||
// TODO: Is it more efficient to return a pointer to the slice?
|
||||
func (am *attributesMap) toKeyValue() []attribute.KeyValue {
|
||||
len := am.evictList.Len()
|
||||
if len == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
attributes := make([]attribute.KeyValue, 0, len)
|
||||
for ent := am.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
if value, ok := ent.Value.(*attribute.KeyValue); ok {
|
||||
attributes = append(attributes, *value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributes
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (am *attributesMap) removeOldest() {
|
||||
ent := am.evictList.Back()
|
||||
if ent != nil {
|
||||
am.evictList.Remove(ent)
|
||||
kv := ent.Value.(*attribute.KeyValue)
|
||||
delete(am.attributes, kv.Key)
|
||||
}
|
||||
}
|
186
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
186
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
@ -22,17 +22,24 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/internal/env"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Defaults for BatchSpanProcessorOptions.
|
||||
const (
|
||||
DefaultMaxQueueSize = 2048
|
||||
DefaultBatchTimeout = 5000 * time.Millisecond
|
||||
DefaultExportTimeout = 30000 * time.Millisecond
|
||||
DefaultScheduleDelay = 5000
|
||||
DefaultExportTimeout = 30000
|
||||
DefaultMaxExportBatchSize = 512
|
||||
)
|
||||
|
||||
// BatchSpanProcessorOption configures a BatchSpanProcessor.
|
||||
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
|
||||
|
||||
// BatchSpanProcessorOptions is configuration settings for a
|
||||
// BatchSpanProcessor.
|
||||
type BatchSpanProcessorOptions struct {
|
||||
// MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
|
||||
// queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
|
||||
@ -63,15 +70,15 @@ type BatchSpanProcessorOptions struct {
|
||||
}
|
||||
|
||||
// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
|
||||
// SpanSnapshots and sends them to a trace.Exporter when complete.
|
||||
// spans and sends them to a trace.Exporter when complete.
|
||||
type batchSpanProcessor struct {
|
||||
e SpanExporter
|
||||
o BatchSpanProcessorOptions
|
||||
|
||||
queue chan *SpanSnapshot
|
||||
queue chan ReadOnlySpan
|
||||
dropped uint32
|
||||
|
||||
batch []*SpanSnapshot
|
||||
batch []ReadOnlySpan
|
||||
batchMutex sync.Mutex
|
||||
timer *time.Timer
|
||||
stopWait sync.WaitGroup
|
||||
@ -86,11 +93,22 @@ var _ SpanProcessor = (*batchSpanProcessor)(nil)
|
||||
//
|
||||
// If the exporter is nil, the span processor will preform no action.
|
||||
func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
|
||||
maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
|
||||
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
|
||||
|
||||
if maxExportBatchSize > maxQueueSize {
|
||||
if DefaultMaxExportBatchSize > maxQueueSize {
|
||||
maxExportBatchSize = maxQueueSize
|
||||
} else {
|
||||
maxExportBatchSize = DefaultMaxExportBatchSize
|
||||
}
|
||||
}
|
||||
|
||||
o := BatchSpanProcessorOptions{
|
||||
BatchTimeout: DefaultBatchTimeout,
|
||||
ExportTimeout: DefaultExportTimeout,
|
||||
MaxQueueSize: DefaultMaxQueueSize,
|
||||
MaxExportBatchSize: DefaultMaxExportBatchSize,
|
||||
BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
|
||||
ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
|
||||
MaxQueueSize: maxQueueSize,
|
||||
MaxExportBatchSize: maxExportBatchSize,
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(&o)
|
||||
@ -98,9 +116,9 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
|
||||
bsp := &batchSpanProcessor{
|
||||
e: exporter,
|
||||
o: o,
|
||||
batch: make([]*SpanSnapshot, 0, o.MaxExportBatchSize),
|
||||
batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
|
||||
timer: time.NewTimer(o.BatchTimeout),
|
||||
queue: make(chan *SpanSnapshot, o.MaxQueueSize),
|
||||
queue: make(chan ReadOnlySpan, o.MaxQueueSize),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
@ -123,7 +141,7 @@ func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
|
||||
if bsp.e == nil {
|
||||
return
|
||||
}
|
||||
bsp.enqueue(s.Snapshot())
|
||||
bsp.enqueue(s)
|
||||
}
|
||||
|
||||
// Shutdown flushes the queue and waits until all spans are processed.
|
||||
@ -152,20 +170,37 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
type forceFlushSpan struct {
|
||||
ReadOnlySpan
|
||||
flushed chan struct{}
|
||||
}
|
||||
|
||||
func (f forceFlushSpan) SpanContext() trace.SpanContext {
|
||||
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
|
||||
}
|
||||
|
||||
// ForceFlush exports all ended spans that have not yet been exported.
|
||||
func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
|
||||
var err error
|
||||
if bsp.e != nil {
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
if err := bsp.exportSpans(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
flushCh := make(chan struct{})
|
||||
if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
|
||||
select {
|
||||
case <-flushCh:
|
||||
// Processed any items in queue prior to ForceFlush being called
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
wait := make(chan error)
|
||||
go func() {
|
||||
wait <- bsp.exportSpans(ctx)
|
||||
close(wait)
|
||||
}()
|
||||
// Wait until the export is finished or the context is cancelled/timed out
|
||||
select {
|
||||
case <-wait:
|
||||
case err = <-wait:
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
@ -173,30 +208,43 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
|
||||
// maximum queue size allowed for a BatchSpanProcessor.
|
||||
func WithMaxQueueSize(size int) BatchSpanProcessorOption {
|
||||
return func(o *BatchSpanProcessorOptions) {
|
||||
o.MaxQueueSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
|
||||
// the maximum export batch size allowed for a BatchSpanProcessor.
|
||||
func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
|
||||
return func(o *BatchSpanProcessorOptions) {
|
||||
o.MaxExportBatchSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
|
||||
// maximum delay allowed for a BatchSpanProcessor before it will export any
|
||||
// held span (whether the queue is full or not).
|
||||
func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
|
||||
return func(o *BatchSpanProcessorOptions) {
|
||||
o.BatchTimeout = delay
|
||||
}
|
||||
}
|
||||
|
||||
// WithExportTimeout returns a BatchSpanProcessorOption that configures the
|
||||
// amount of time a BatchSpanProcessor waits for an exporter to export before
|
||||
// abandoning the export.
|
||||
func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
|
||||
return func(o *BatchSpanProcessorOptions) {
|
||||
o.ExportTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlocking returns a BatchSpanProcessorOption that configures a
|
||||
// BatchSpanProcessor to wait for enqueue operations to succeed instead of
|
||||
// dropping data when the queue is full.
|
||||
func WithBlocking() BatchSpanProcessorOption {
|
||||
return func(o *BatchSpanProcessorOptions) {
|
||||
o.BlockOnQueueFull = true
|
||||
@ -216,11 +264,19 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if len(bsp.batch) > 0 {
|
||||
if err := bsp.e.ExportSpans(ctx, bsp.batch); err != nil {
|
||||
if l := len(bsp.batch); l > 0 {
|
||||
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
|
||||
err := bsp.e.ExportSpans(ctx, bsp.batch)
|
||||
|
||||
// A new batch is always created after exporting, even if the batch failed to be exported.
|
||||
//
|
||||
// It is up to the exporter to implement any type of retry logic if a batch is failing
|
||||
// to be exported, since it is specific to the protocol and backend being sent to.
|
||||
bsp.batch = bsp.batch[:0]
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bsp.batch = bsp.batch[:0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -242,9 +298,13 @@ func (bsp *batchSpanProcessor) processQueue() {
|
||||
otel.Handle(err)
|
||||
}
|
||||
case sd := <-bsp.queue:
|
||||
if ffs, ok := sd.(forceFlushSpan); ok {
|
||||
close(ffs.flushed)
|
||||
continue
|
||||
}
|
||||
bsp.batchMutex.Lock()
|
||||
bsp.batch = append(bsp.batch, sd)
|
||||
shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
|
||||
shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
|
||||
bsp.batchMutex.Unlock()
|
||||
if shouldExport {
|
||||
if !bsp.timer.Stop() {
|
||||
@ -289,40 +349,84 @@ func (bsp *batchSpanProcessor) drainQueue() {
|
||||
}
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueue(sd *SpanSnapshot) {
|
||||
if !sd.SpanContext.IsSampled() {
|
||||
func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
|
||||
ctx := context.TODO()
|
||||
if bsp.o.BlockOnQueueFull {
|
||||
bsp.enqueueBlockOnQueueFull(ctx, sd)
|
||||
} else {
|
||||
bsp.enqueueDrop(ctx, sd)
|
||||
}
|
||||
}
|
||||
|
||||
func recoverSendOnClosedChan() {
|
||||
x := recover()
|
||||
switch err := x.(type) {
|
||||
case nil:
|
||||
return
|
||||
case runtime.Error:
|
||||
if err.Error() == "send on closed channel" {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(x)
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
|
||||
if !sd.SpanContext().IsSampled() {
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the bsp.queue<- below does not panic as the
|
||||
// processor shuts down.
|
||||
defer func() {
|
||||
x := recover()
|
||||
switch err := x.(type) {
|
||||
case nil:
|
||||
return
|
||||
case runtime.Error:
|
||||
if err.Error() == "send on closed channel" {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(x)
|
||||
}()
|
||||
defer recoverSendOnClosedChan()
|
||||
|
||||
select {
|
||||
case <-bsp.stopCh:
|
||||
return
|
||||
return false
|
||||
default:
|
||||
}
|
||||
|
||||
if bsp.o.BlockOnQueueFull {
|
||||
bsp.queue <- sd
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case bsp.queue <- sd:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
|
||||
if !sd.SpanContext().IsSampled() {
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the bsp.queue<- below does not panic as the
|
||||
// processor shuts down.
|
||||
defer recoverSendOnClosedChan()
|
||||
|
||||
select {
|
||||
case <-bsp.stopCh:
|
||||
return false
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case bsp.queue <- sd:
|
||||
return true
|
||||
default:
|
||||
atomic.AddUint32(&bsp.dropped, 1)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
|
||||
return struct {
|
||||
Type string
|
||||
SpanExporter SpanExporter
|
||||
Config BatchSpanProcessorOptions
|
||||
}{
|
||||
Type: "BatchSpanProcessor",
|
||||
SpanExporter: bsp.e,
|
||||
Config: bsp.o,
|
||||
}
|
||||
}
|
||||
|
68
vendor/go.opentelemetry.io/otel/sdk/trace/config.go
generated
vendored
68
vendor/go.opentelemetry.io/otel/sdk/trace/config.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
// SpanLimits represents the limits of a span.
|
||||
type SpanLimits struct {
|
||||
// AttributeCountLimit is the maximum allowed span attribute count.
|
||||
AttributeCountLimit int
|
||||
|
||||
// EventCountLimit is the maximum allowed span event count.
|
||||
EventCountLimit int
|
||||
|
||||
// LinkCountLimit is the maximum allowed span link count.
|
||||
LinkCountLimit int
|
||||
|
||||
// AttributePerEventCountLimit is the maximum allowed attribute per span event count.
|
||||
AttributePerEventCountLimit int
|
||||
|
||||
// AttributePerLinkCountLimit is the maximum allowed attribute per span link count.
|
||||
AttributePerLinkCountLimit int
|
||||
}
|
||||
|
||||
func (sl *SpanLimits) ensureDefault() {
|
||||
if sl.EventCountLimit <= 0 {
|
||||
sl.EventCountLimit = DefaultEventCountLimit
|
||||
}
|
||||
if sl.AttributeCountLimit <= 0 {
|
||||
sl.AttributeCountLimit = DefaultAttributeCountLimit
|
||||
}
|
||||
if sl.LinkCountLimit <= 0 {
|
||||
sl.LinkCountLimit = DefaultLinkCountLimit
|
||||
}
|
||||
if sl.AttributePerEventCountLimit <= 0 {
|
||||
sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
|
||||
}
|
||||
if sl.AttributePerLinkCountLimit <= 0 {
|
||||
sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultAttributeCountLimit is the default maximum allowed span attribute count.
|
||||
DefaultAttributeCountLimit = 128
|
||||
|
||||
// DefaultEventCountLimit is the default maximum allowed span event count.
|
||||
DefaultEventCountLimit = 128
|
||||
|
||||
// DefaultLinkCountLimit is the default maximum allowed span link count.
|
||||
DefaultLinkCountLimit = 128
|
||||
|
||||
// DefaultAttributePerEventCountLimit is the default maximum allowed attribute per span event count.
|
||||
DefaultAttributePerEventCountLimit = 128
|
||||
|
||||
// DefaultAttributePerLinkCountLimit is the default maximum allowed attribute per span link count.
|
||||
DefaultAttributePerLinkCountLimit = 128
|
||||
)
|
4
vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
generated
vendored
@ -15,10 +15,6 @@
|
||||
/*
|
||||
Package trace contains support for OpenTelemetry distributed tracing.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
|
||||
The following assumes a basic familiarity with OpenTelemetry concepts.
|
||||
See https://opentelemetry.io.
|
||||
*/
|
||||
|
37
vendor/go.opentelemetry.io/otel/sdk/trace/event.go
generated
vendored
Normal file
37
vendor/go.opentelemetry.io/otel/sdk/trace/event.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// Event is a thing that happened during a Span's lifetime.
|
||||
type Event struct {
|
||||
// Name is the name of this event
|
||||
Name string
|
||||
|
||||
// Attributes describe the aspects of the event.
|
||||
Attributes []attribute.KeyValue
|
||||
|
||||
// DroppedAttributeCount is the number of attributes that were not
|
||||
// recorded due to configured limits being reached.
|
||||
DroppedAttributeCount int
|
||||
|
||||
// Time at which this event was recorded.
|
||||
Time time.Time
|
||||
}
|
24
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
24
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
@ -14,24 +14,30 @@
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
// evictedQueue is a FIFO queue with a configurable capacity.
|
||||
type evictedQueue struct {
|
||||
queue []interface{}
|
||||
capacity int
|
||||
droppedCount int
|
||||
}
|
||||
|
||||
func newEvictedQueue(capacity int) *evictedQueue {
|
||||
eq := &evictedQueue{
|
||||
capacity: capacity,
|
||||
queue: make([]interface{}, 0),
|
||||
}
|
||||
|
||||
return eq
|
||||
func newEvictedQueue(capacity int) evictedQueue {
|
||||
// Do not pre-allocate queue, do this lazily.
|
||||
return evictedQueue{capacity: capacity}
|
||||
}
|
||||
|
||||
// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
|
||||
// queued value will be discarded and the drop count incremented.
|
||||
func (eq *evictedQueue) add(value interface{}) {
|
||||
if len(eq.queue) == eq.capacity {
|
||||
eq.queue = eq.queue[1:]
|
||||
if eq.capacity == 0 {
|
||||
eq.droppedCount++
|
||||
return
|
||||
}
|
||||
|
||||
if eq.capacity > 0 && len(eq.queue) == eq.capacity {
|
||||
// Drop first-in while avoiding allocating more capacity to eq.queue.
|
||||
copy(eq.queue[:eq.capacity-1], eq.queue[1:])
|
||||
eq.queue = eq.queue[:eq.capacity-1]
|
||||
eq.droppedCount++
|
||||
}
|
||||
eq.queue = append(eq.queue, value)
|
||||
|
16
vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
generated
vendored
@ -26,8 +26,18 @@ import (
|
||||
|
||||
// IDGenerator allows custom generators for TraceID and SpanID.
|
||||
type IDGenerator interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// NewIDs returns a new trace and span ID.
|
||||
NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// NewSpanID returns a ID for a new span in the trace with traceID.
|
||||
NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
type randomIDGenerator struct {
|
||||
@ -42,7 +52,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
sid := trace.SpanID{}
|
||||
gen.randSource.Read(sid[:])
|
||||
_, _ = gen.randSource.Read(sid[:])
|
||||
return sid
|
||||
}
|
||||
|
||||
@ -52,9 +62,9 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
tid := trace.TraceID{}
|
||||
gen.randSource.Read(tid[:])
|
||||
_, _ = gen.randSource.Read(tid[:])
|
||||
sid := trace.SpanID{}
|
||||
gen.randSource.Read(sid[:])
|
||||
_, _ = gen.randSource.Read(sid[:])
|
||||
return tid, sid
|
||||
}
|
||||
|
||||
|
34
vendor/go.opentelemetry.io/otel/sdk/trace/link.go
generated
vendored
Normal file
34
vendor/go.opentelemetry.io/otel/sdk/trace/link.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Link is the relationship between two Spans. The relationship can be within
|
||||
// the same Trace or across different Traces.
|
||||
type Link struct {
|
||||
// SpanContext of the linked Span.
|
||||
SpanContext trace.SpanContext
|
||||
|
||||
// Attributes describe the aspects of the link.
|
||||
Attributes []attribute.KeyValue
|
||||
|
||||
// DroppedAttributeCount is the number of attributes that were not
|
||||
// recorded due to configured limits being reached.
|
||||
DroppedAttributeCount int
|
||||
}
|
257
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
257
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
@ -21,21 +21,23 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
|
||||
)
|
||||
|
||||
// TODO (MrAlias): unify this API option design:
|
||||
// https://github.com/open-telemetry/opentelemetry-go/issues/536
|
||||
|
||||
// TracerProviderConfig
|
||||
type TracerProviderConfig struct {
|
||||
// tracerProviderConfig.
|
||||
type tracerProviderConfig struct {
|
||||
// processors contains collection of SpanProcessors that are processing pipeline
|
||||
// for spans in the trace signal.
|
||||
// SpanProcessors registered with a TracerProvider and are called at the start
|
||||
// and end of a Span's lifecycle, and are called in the order they are
|
||||
// registered.
|
||||
processors []SpanProcessor
|
||||
|
||||
// sampler is the default sampler used when creating new spans.
|
||||
@ -51,16 +53,36 @@ type TracerProviderConfig struct {
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
type TracerProviderOption func(*TracerProviderConfig)
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||
func (cfg tracerProviderConfig) MarshalLog() interface{} {
|
||||
return struct {
|
||||
SpanProcessors []SpanProcessor
|
||||
SamplerType string
|
||||
IDGeneratorType string
|
||||
SpanLimits SpanLimits
|
||||
Resource *resource.Resource
|
||||
}{
|
||||
SpanProcessors: cfg.processors,
|
||||
SamplerType: fmt.Sprintf("%T", cfg.sampler),
|
||||
IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
|
||||
SpanLimits: cfg.spanLimits,
|
||||
Resource: cfg.resource,
|
||||
}
|
||||
}
|
||||
|
||||
// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
|
||||
// instrumentation so it can trace operational flow through a system.
|
||||
type TracerProvider struct {
|
||||
mu sync.Mutex
|
||||
namedTracer map[instrumentation.Library]*tracer
|
||||
namedTracer map[instrumentation.Scope]*tracer
|
||||
spanProcessors atomic.Value
|
||||
sampler Sampler
|
||||
idGenerator IDGenerator
|
||||
spanLimits SpanLimits
|
||||
resource *resource.Resource
|
||||
|
||||
// These fields are not protected by the lock mu. They are assumed to be
|
||||
// immutable after creation of the TracerProvider.
|
||||
sampler Sampler
|
||||
idGenerator IDGenerator
|
||||
spanLimits SpanLimits
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
var _ trace.TracerProvider = &TracerProvider{}
|
||||
@ -68,30 +90,35 @@ var _ trace.TracerProvider = &TracerProvider{}
|
||||
// NewTracerProvider returns a new and configured TracerProvider.
|
||||
//
|
||||
// By default the returned TracerProvider is configured with:
|
||||
// - a ParentBased(AlwaysSample) Sampler
|
||||
// - a random number IDGenerator
|
||||
// - the resource.Default() Resource
|
||||
// - the default SpanLimits.
|
||||
// - a ParentBased(AlwaysSample) Sampler
|
||||
// - a random number IDGenerator
|
||||
// - the resource.Default() Resource
|
||||
// - the default SpanLimits.
|
||||
//
|
||||
// The passed opts are used to override these default values and configure the
|
||||
// returned TracerProvider appropriately.
|
||||
func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
|
||||
o := &TracerProviderConfig{}
|
||||
o := tracerProviderConfig{
|
||||
spanLimits: NewSpanLimits(),
|
||||
}
|
||||
o = applyTracerProviderEnvConfigs(o)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
o = opt.apply(o)
|
||||
}
|
||||
|
||||
ensureValidTracerProviderConfig(o)
|
||||
o = ensureValidTracerProviderConfig(o)
|
||||
|
||||
tp := &TracerProvider{
|
||||
namedTracer: make(map[instrumentation.Library]*tracer),
|
||||
namedTracer: make(map[instrumentation.Scope]*tracer),
|
||||
sampler: o.sampler,
|
||||
idGenerator: o.idGenerator,
|
||||
spanLimits: o.spanLimits,
|
||||
resource: o.resource,
|
||||
}
|
||||
|
||||
global.Info("TracerProvider created", "config", o)
|
||||
|
||||
for _, sp := range o.processors {
|
||||
tp.RegisterSpanProcessor(sp)
|
||||
}
|
||||
@ -114,38 +141,40 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
if name == "" {
|
||||
name = defaultTracerName
|
||||
}
|
||||
il := instrumentation.Library{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion,
|
||||
is := instrumentation.Scope{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
}
|
||||
t, ok := p.namedTracer[il]
|
||||
t, ok := p.namedTracer[is]
|
||||
if !ok {
|
||||
t = &tracer{
|
||||
provider: p,
|
||||
instrumentationLibrary: il,
|
||||
provider: p,
|
||||
instrumentationScope: is,
|
||||
}
|
||||
p.namedTracer[il] = t
|
||||
p.namedTracer[is] = t
|
||||
global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors
|
||||
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
|
||||
func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
new := spanProcessorStates{}
|
||||
newSPS := spanProcessorStates{}
|
||||
if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok {
|
||||
new = append(new, old...)
|
||||
newSPS = append(newSPS, old...)
|
||||
}
|
||||
newSpanSync := &spanProcessorState{
|
||||
sp: s,
|
||||
state: &sync.Once{},
|
||||
}
|
||||
new = append(new, newSpanSync)
|
||||
p.spanProcessors.Store(new)
|
||||
newSPS = append(newSPS, newSpanSync)
|
||||
p.spanProcessors.Store(newSPS)
|
||||
}
|
||||
|
||||
// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors
|
||||
// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
|
||||
func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
@ -212,10 +241,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to load span processors")
|
||||
}
|
||||
if len(spss) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var retErr error
|
||||
for _, sps := range spss {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -228,14 +254,36 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
|
||||
err = sps.sp.Shutdown(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
} else {
|
||||
// Poor man's list of errors
|
||||
retErr = fmt.Errorf("%v; %v", retErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return retErr
|
||||
}
|
||||
|
||||
// TracerProviderOption configures a TracerProvider.
|
||||
type TracerProviderOption interface {
|
||||
apply(tracerProviderConfig) tracerProviderConfig
|
||||
}
|
||||
|
||||
type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
|
||||
|
||||
func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
return fn(cfg)
|
||||
}
|
||||
|
||||
// WithSyncer registers the exporter with the TracerProvider using a
|
||||
// SimpleSpanProcessor.
|
||||
//
|
||||
// This is not recommended for production use. The synchronous nature of the
|
||||
// SimpleSpanProcessor that will wrap the exporter make it good for testing,
|
||||
// debugging, or showing examples of other feature, but it will be slow and
|
||||
// have a high computation resource usage overhead. The WithBatcher option is
|
||||
// recommended for production use instead.
|
||||
func WithSyncer(e SpanExporter) TracerProviderOption {
|
||||
return WithSpanProcessor(NewSimpleSpanProcessor(e))
|
||||
}
|
||||
@ -248,9 +296,10 @@ func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProvide
|
||||
|
||||
// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
|
||||
func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
|
||||
return func(opts *TracerProviderConfig) {
|
||||
opts.processors = append(opts.processors, sp)
|
||||
}
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
cfg.processors = append(cfg.processors, sp)
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithResource returns a TracerProviderOption that will configure the
|
||||
@ -261,9 +310,14 @@ func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
|
||||
// If this option is not used, the TracerProvider will use the
|
||||
// resource.Default() Resource by default.
|
||||
func WithResource(r *resource.Resource) TracerProviderOption {
|
||||
return func(opts *TracerProviderConfig) {
|
||||
opts.resource = resource.Merge(resource.Environment(), r)
|
||||
}
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
var err error
|
||||
cfg.resource, err = resource.Merge(resource.Environment(), r)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithIDGenerator returns a TracerProviderOption that will configure the
|
||||
@ -274,11 +328,12 @@ func WithResource(r *resource.Resource) TracerProviderOption {
|
||||
// If this option is not used, the TracerProvider will use a random number
|
||||
// IDGenerator by default.
|
||||
func WithIDGenerator(g IDGenerator) TracerProviderOption {
|
||||
return func(opts *TracerProviderConfig) {
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
if g != nil {
|
||||
opts.idGenerator = g
|
||||
cfg.idGenerator = g
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithSampler returns a TracerProviderOption that will configure the Sampler
|
||||
@ -286,39 +341,115 @@ func WithIDGenerator(g IDGenerator) TracerProviderOption {
|
||||
// Tracers the TracerProvider creates to make their sampling decisions for the
|
||||
// Spans they create.
|
||||
//
|
||||
// If this option is not used, the TracerProvider will use a
|
||||
// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
|
||||
// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
|
||||
// and the sampler is not configured through environment variables or the environment
|
||||
// contains invalid/unsupported configuration, the TracerProvider will use a
|
||||
// ParentBased(AlwaysSample) Sampler by default.
|
||||
func WithSampler(s Sampler) TracerProviderOption {
|
||||
return func(opts *TracerProviderConfig) {
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
if s != nil {
|
||||
opts.sampler = s
|
||||
cfg.sampler = s
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithSpanLimits returns a TracerProviderOption that will configure the
|
||||
// SpanLimits sl as a TracerProvider's SpanLimits. The configured SpanLimits
|
||||
// are used used by the Tracers the TracerProvider and the Spans they create
|
||||
// to limit tracing resources used.
|
||||
// WithSpanLimits returns a TracerProviderOption that configures a
|
||||
// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
|
||||
// created by a Tracer from the TracerProvider.
|
||||
//
|
||||
// If this option is not used, the TracerProvider will use the default
|
||||
// SpanLimits.
|
||||
// If any field of sl is zero or negative it will be replaced with the default
|
||||
// value for that field.
|
||||
//
|
||||
// If this or WithRawSpanLimits are not provided, the TracerProvider will use
|
||||
// the limits defined by environment variables, or the defaults if unset.
|
||||
// Refer to the NewSpanLimits documentation for information about this
|
||||
// relationship.
|
||||
//
|
||||
// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
|
||||
// and zero limits. This option will be kept until the next major version
|
||||
// incremented release.
|
||||
func WithSpanLimits(sl SpanLimits) TracerProviderOption {
|
||||
return func(opts *TracerProviderConfig) {
|
||||
opts.spanLimits = sl
|
||||
if sl.AttributeValueLengthLimit <= 0 {
|
||||
sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
|
||||
}
|
||||
if sl.AttributeCountLimit <= 0 {
|
||||
sl.AttributeCountLimit = DefaultAttributeCountLimit
|
||||
}
|
||||
if sl.EventCountLimit <= 0 {
|
||||
sl.EventCountLimit = DefaultEventCountLimit
|
||||
}
|
||||
if sl.AttributePerEventCountLimit <= 0 {
|
||||
sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
|
||||
}
|
||||
if sl.LinkCountLimit <= 0 {
|
||||
sl.LinkCountLimit = DefaultLinkCountLimit
|
||||
}
|
||||
if sl.AttributePerLinkCountLimit <= 0 {
|
||||
sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
|
||||
}
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
cfg.spanLimits = sl
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithRawSpanLimits returns a TracerProviderOption that configures a
|
||||
// TracerProvider to use these limits. These limits bound any Span created by
|
||||
// a Tracer from the TracerProvider.
|
||||
//
|
||||
// The limits will be used as-is. Zero or negative values will not be changed
|
||||
// to the default value like WithSpanLimits does. Setting a limit to zero will
|
||||
// effectively disable the related resource it limits and setting to a
|
||||
// negative value will mean that resource is unlimited. Consequentially, this
|
||||
// means that the zero-value SpanLimits will disable all span resources.
|
||||
// Because of this, limits should be constructed using NewSpanLimits and
|
||||
// updated accordingly.
|
||||
//
|
||||
// If this or WithSpanLimits are not provided, the TracerProvider will use the
|
||||
// limits defined by environment variables, or the defaults if unset. Refer to
|
||||
// the NewSpanLimits documentation for information about this relationship.
|
||||
func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
|
||||
return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
cfg.spanLimits = limits
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
for _, opt := range tracerProviderOptionsFromEnv() {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func tracerProviderOptionsFromEnv() []TracerProviderOption {
|
||||
var opts []TracerProviderOption
|
||||
|
||||
sampler, err := samplerFromEnv()
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
|
||||
if sampler != nil {
|
||||
opts = append(opts, WithSampler(sampler))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
|
||||
func ensureValidTracerProviderConfig(cfg *TracerProviderConfig) {
|
||||
func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
|
||||
if cfg.sampler == nil {
|
||||
cfg.sampler = ParentBased(AlwaysSample())
|
||||
}
|
||||
if cfg.idGenerator == nil {
|
||||
cfg.idGenerator = defaultIDGenerator()
|
||||
}
|
||||
cfg.spanLimits.ensureDefault()
|
||||
if cfg.resource == nil {
|
||||
cfg.resource = resource.Default()
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
108
vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
generated
vendored
Normal file
108
vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
tracesSamplerKey = "OTEL_TRACES_SAMPLER"
|
||||
tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
|
||||
|
||||
samplerAlwaysOn = "always_on"
|
||||
samplerAlwaysOff = "always_off"
|
||||
samplerTraceIDRatio = "traceidratio"
|
||||
samplerParentBasedAlwaysOn = "parentbased_always_on"
|
||||
samplerParsedBasedAlwaysOff = "parentbased_always_off"
|
||||
samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
|
||||
)
|
||||
|
||||
type errUnsupportedSampler string
|
||||
|
||||
func (e errUnsupportedSampler) Error() string {
|
||||
return fmt.Sprintf("unsupported sampler: %s", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0")
|
||||
errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
|
||||
)
|
||||
|
||||
type samplerArgParseError struct {
|
||||
parseErr error
|
||||
}
|
||||
|
||||
func (e samplerArgParseError) Error() string {
|
||||
return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error())
|
||||
}
|
||||
|
||||
func (e samplerArgParseError) Unwrap() error {
|
||||
return e.parseErr
|
||||
}
|
||||
|
||||
func samplerFromEnv() (Sampler, error) {
|
||||
sampler, ok := os.LookupEnv(tracesSamplerKey)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sampler = strings.ToLower(strings.TrimSpace(sampler))
|
||||
samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
|
||||
samplerArg = strings.TrimSpace(samplerArg)
|
||||
|
||||
switch sampler {
|
||||
case samplerAlwaysOn:
|
||||
return AlwaysSample(), nil
|
||||
case samplerAlwaysOff:
|
||||
return NeverSample(), nil
|
||||
case samplerTraceIDRatio:
|
||||
if !hasSamplerArg {
|
||||
return TraceIDRatioBased(1.0), nil
|
||||
}
|
||||
return parseTraceIDRatio(samplerArg)
|
||||
case samplerParentBasedAlwaysOn:
|
||||
return ParentBased(AlwaysSample()), nil
|
||||
case samplerParsedBasedAlwaysOff:
|
||||
return ParentBased(NeverSample()), nil
|
||||
case samplerParentBasedTraceIDRatio:
|
||||
if !hasSamplerArg {
|
||||
return ParentBased(TraceIDRatioBased(1.0)), nil
|
||||
}
|
||||
ratio, err := parseTraceIDRatio(samplerArg)
|
||||
return ParentBased(ratio), err
|
||||
default:
|
||||
return nil, errUnsupportedSampler(sampler)
|
||||
}
|
||||
}
|
||||
|
||||
func parseTraceIDRatio(arg string) (Sampler, error) {
|
||||
v, err := strconv.ParseFloat(arg, 64)
|
||||
if err != nil {
|
||||
return TraceIDRatioBased(1.0), samplerArgParseError{err}
|
||||
}
|
||||
if v < 0.0 {
|
||||
return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
|
||||
}
|
||||
if v > 1.0 {
|
||||
return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
|
||||
}
|
||||
|
||||
return TraceIDRatioBased(v), nil
|
||||
}
|
61
vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
generated
vendored
61
vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
generated
vendored
@ -25,8 +25,19 @@ import (
|
||||
|
||||
// Sampler decides whether a trace should be sampled and exported.
|
||||
type Sampler interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// ShouldSample returns a SamplingResult based on a decision made from the
|
||||
// passed parameters.
|
||||
ShouldSample(parameters SamplingParameters) SamplingResult
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Description returns information describing the Sampler.
|
||||
Description() string
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// SamplingParameters contains the values passed to a Sampler.
|
||||
@ -42,17 +53,17 @@ type SamplingParameters struct {
|
||||
// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
|
||||
type SamplingDecision uint8
|
||||
|
||||
// Valid sampling decisions
|
||||
// Valid sampling decisions.
|
||||
const (
|
||||
// Drop will not record the span and all attributes/events will be dropped
|
||||
// Drop will not record the span and all attributes/events will be dropped.
|
||||
Drop SamplingDecision = iota
|
||||
|
||||
// Record indicates the span's `IsRecording() == true`, but `Sampled` flag
|
||||
// *must not* be set
|
||||
// *must not* be set.
|
||||
RecordOnly
|
||||
|
||||
// RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
|
||||
// *must* be set
|
||||
// *must* be set.
|
||||
RecordAndSample
|
||||
)
|
||||
|
||||
@ -91,7 +102,8 @@ func (ts traceIDRatioSampler) Description() string {
|
||||
// always sample. Fractions < 0 are treated as zero. To respect the
|
||||
// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
|
||||
// as a delegate of a `Parent` sampler.
|
||||
//nolint:golint // golint complains about stutter of `trace.TraceIDRatioBased`
|
||||
//
|
||||
//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
|
||||
func TraceIDRatioBased(fraction float64) Sampler {
|
||||
if fraction >= 1 {
|
||||
return AlwaysSample()
|
||||
@ -164,11 +176,11 @@ func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
|
||||
|
||||
type parentBased struct {
|
||||
root Sampler
|
||||
config config
|
||||
config samplerConfig
|
||||
}
|
||||
|
||||
func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) config {
|
||||
c := config{
|
||||
func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
|
||||
c := samplerConfig{
|
||||
remoteParentSampled: AlwaysSample(),
|
||||
remoteParentNotSampled: NeverSample(),
|
||||
localParentSampled: AlwaysSample(),
|
||||
@ -176,26 +188,21 @@ func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) config
|
||||
}
|
||||
|
||||
for _, so := range samplers {
|
||||
so.Apply(&c)
|
||||
c = so.apply(c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// config is a group of options for parentBased sampler.
|
||||
type config struct {
|
||||
// samplerConfig is a group of options for parentBased sampler.
|
||||
type samplerConfig struct {
|
||||
remoteParentSampled, remoteParentNotSampled Sampler
|
||||
localParentSampled, localParentNotSampled Sampler
|
||||
}
|
||||
|
||||
// ParentBasedSamplerOption configures the sampler for a particular sampling case.
|
||||
type ParentBasedSamplerOption interface {
|
||||
Apply(*config)
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
apply(samplerConfig) samplerConfig
|
||||
}
|
||||
|
||||
// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
|
||||
@ -207,12 +214,11 @@ type remoteParentSampledOption struct {
|
||||
s Sampler
|
||||
}
|
||||
|
||||
func (o remoteParentSampledOption) Apply(config *config) {
|
||||
func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
|
||||
config.remoteParentSampled = o.s
|
||||
return config
|
||||
}
|
||||
|
||||
func (remoteParentSampledOption) private() {}
|
||||
|
||||
// WithRemoteParentNotSampled sets the sampler for the case of remote parent
|
||||
// which is not sampled.
|
||||
func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
|
||||
@ -223,12 +229,11 @@ type remoteParentNotSampledOption struct {
|
||||
s Sampler
|
||||
}
|
||||
|
||||
func (o remoteParentNotSampledOption) Apply(config *config) {
|
||||
func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
|
||||
config.remoteParentNotSampled = o.s
|
||||
return config
|
||||
}
|
||||
|
||||
func (remoteParentNotSampledOption) private() {}
|
||||
|
||||
// WithLocalParentSampled sets the sampler for the case of sampled local parent.
|
||||
func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
|
||||
return localParentSampledOption{s}
|
||||
@ -238,12 +243,11 @@ type localParentSampledOption struct {
|
||||
s Sampler
|
||||
}
|
||||
|
||||
func (o localParentSampledOption) Apply(config *config) {
|
||||
func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
|
||||
config.localParentSampled = o.s
|
||||
return config
|
||||
}
|
||||
|
||||
func (localParentSampledOption) private() {}
|
||||
|
||||
// WithLocalParentNotSampled sets the sampler for the case of local parent
|
||||
// which is not sampled.
|
||||
func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
|
||||
@ -254,12 +258,11 @@ type localParentNotSampledOption struct {
|
||||
s Sampler
|
||||
}
|
||||
|
||||
func (o localParentNotSampledOption) Apply(config *config) {
|
||||
func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
|
||||
config.localParentNotSampled = o.s
|
||||
return config
|
||||
}
|
||||
|
||||
func (localParentNotSampledOption) private() {}
|
||||
|
||||
func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
|
||||
psc := trace.SpanContextFromContext(p.ParentContext)
|
||||
if psc.IsValid() {
|
||||
|
64
vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
generated
vendored
64
vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
generated
vendored
@ -33,6 +33,12 @@ var _ SpanProcessor = (*simpleSpanProcessor)(nil)
|
||||
|
||||
// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
|
||||
// send completed spans to the exporter immediately.
|
||||
//
|
||||
// This SpanProcessor is not recommended for production use. The synchronous
|
||||
// nature of this SpanProcessor make it good for testing, debugging, or
|
||||
// showing examples of other feature, but it will be slow and have a high
|
||||
// computation resource usage overhead. The BatchSpanProcessor is recommended
|
||||
// for production use instead.
|
||||
func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
|
||||
ssp := &simpleSpanProcessor{
|
||||
exporter: exporter,
|
||||
@ -49,8 +55,7 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
|
||||
defer ssp.exporterMu.RUnlock()
|
||||
|
||||
if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
|
||||
ss := s.Snapshot()
|
||||
if err := ssp.exporter.ExportSpans(context.Background(), []*SpanSnapshot{ss}); err != nil {
|
||||
if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
@ -60,16 +65,48 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
|
||||
func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
var err error
|
||||
ssp.stopOnce.Do(func() {
|
||||
stopFunc := func(exp SpanExporter) (<-chan error, func()) {
|
||||
done := make(chan error)
|
||||
return done, func() { done <- exp.Shutdown(ctx) }
|
||||
}
|
||||
|
||||
// The exporter field of the simpleSpanProcessor needs to be zeroed to
|
||||
// signal it is shut down, meaning all subsequent calls to OnEnd will
|
||||
// be gracefully ignored. This needs to be done synchronously to avoid
|
||||
// any race condition.
|
||||
//
|
||||
// A closure is used to keep reference to the exporter and then the
|
||||
// field is zeroed. This ensures the simpleSpanProcessor is shut down
|
||||
// before the exporter. This order is important as it avoids a
|
||||
// potential deadlock. If the exporter shut down operation generates a
|
||||
// span, that span would need to be exported. Meaning, OnEnd would be
|
||||
// called and try acquiring the lock that is held here.
|
||||
ssp.exporterMu.Lock()
|
||||
exporter := ssp.exporter
|
||||
// Set exporter to nil so subsequent calls to OnEnd are ignored
|
||||
// gracefully.
|
||||
done, shutdown := stopFunc(ssp.exporter)
|
||||
ssp.exporter = nil
|
||||
ssp.exporterMu.Unlock()
|
||||
|
||||
// Clear the ssp.exporter prior to shutting it down so if that creates
|
||||
// a span that needs to be exported there is no deadlock.
|
||||
err = exporter.Shutdown(ctx)
|
||||
go shutdown()
|
||||
|
||||
// Wait for the exporter to shut down or the deadline to expire.
|
||||
select {
|
||||
case err = <-done:
|
||||
case <-ctx.Done():
|
||||
// It is possible for the exporter to have immediately shut down
|
||||
// and the context to be done simultaneously. In that case this
|
||||
// outer select statement will randomly choose a case. This will
|
||||
// result in a different returned error for similar scenarios.
|
||||
// Instead, double check if the exporter shut down at the same
|
||||
// time and return that error if so. This will ensure consistency
|
||||
// as well as ensure the caller knows the exporter shut down
|
||||
// successfully (they can already determine if the deadline is
|
||||
// expired given they passed the context).
|
||||
select {
|
||||
case err = <-done:
|
||||
default:
|
||||
err = ctx.Err()
|
||||
}
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
@ -78,3 +115,14 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
|
||||
func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
|
||||
return struct {
|
||||
Type string
|
||||
Exporter SpanExporter
|
||||
}{
|
||||
Type: "SimpleSpanProcessor",
|
||||
Exporter: ssp.exporter,
|
||||
}
|
||||
}
|
||||
|
144
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
Normal file
144
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// snapshot is an record of a spans state at a particular checkpointed time.
|
||||
// It is used as a read-only representation of that state.
|
||||
type snapshot struct {
|
||||
name string
|
||||
spanContext trace.SpanContext
|
||||
parent trace.SpanContext
|
||||
spanKind trace.SpanKind
|
||||
startTime time.Time
|
||||
endTime time.Time
|
||||
attributes []attribute.KeyValue
|
||||
events []Event
|
||||
links []Link
|
||||
status Status
|
||||
childSpanCount int
|
||||
droppedAttributeCount int
|
||||
droppedEventCount int
|
||||
droppedLinkCount int
|
||||
resource *resource.Resource
|
||||
instrumentationScope instrumentation.Scope
|
||||
}
|
||||
|
||||
var _ ReadOnlySpan = snapshot{}
|
||||
|
||||
func (s snapshot) private() {}
|
||||
|
||||
// Name returns the name of the span.
|
||||
func (s snapshot) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// SpanContext returns the unique SpanContext that identifies the span.
|
||||
func (s snapshot) SpanContext() trace.SpanContext {
|
||||
return s.spanContext
|
||||
}
|
||||
|
||||
// Parent returns the unique SpanContext that identifies the parent of the
|
||||
// span if one exists. If the span has no parent the returned SpanContext
|
||||
// will be invalid.
|
||||
func (s snapshot) Parent() trace.SpanContext {
|
||||
return s.parent
|
||||
}
|
||||
|
||||
// SpanKind returns the role the span plays in a Trace.
|
||||
func (s snapshot) SpanKind() trace.SpanKind {
|
||||
return s.spanKind
|
||||
}
|
||||
|
||||
// StartTime returns the time the span started recording.
|
||||
func (s snapshot) StartTime() time.Time {
|
||||
return s.startTime
|
||||
}
|
||||
|
||||
// EndTime returns the time the span stopped recording. It will be zero if
|
||||
// the span has not ended.
|
||||
func (s snapshot) EndTime() time.Time {
|
||||
return s.endTime
|
||||
}
|
||||
|
||||
// Attributes returns the defining attributes of the span.
|
||||
func (s snapshot) Attributes() []attribute.KeyValue {
|
||||
return s.attributes
|
||||
}
|
||||
|
||||
// Links returns all the links the span has to other spans.
|
||||
func (s snapshot) Links() []Link {
|
||||
return s.links
|
||||
}
|
||||
|
||||
// Events returns all the events that occurred within in the spans
|
||||
// lifetime.
|
||||
func (s snapshot) Events() []Event {
|
||||
return s.events
|
||||
}
|
||||
|
||||
// Status returns the spans status.
|
||||
func (s snapshot) Status() Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// InstrumentationScope returns information about the instrumentation
|
||||
// scope that created the span.
|
||||
func (s snapshot) InstrumentationScope() instrumentation.Scope {
|
||||
return s.instrumentationScope
|
||||
}
|
||||
|
||||
// InstrumentationLibrary returns information about the instrumentation
|
||||
// library that created the span.
|
||||
func (s snapshot) InstrumentationLibrary() instrumentation.Library {
|
||||
return s.instrumentationScope
|
||||
}
|
||||
|
||||
// Resource returns information about the entity that produced the span.
|
||||
func (s snapshot) Resource() *resource.Resource {
|
||||
return s.resource
|
||||
}
|
||||
|
||||
// DroppedAttributes returns the number of attributes dropped by the span
|
||||
// due to limits being reached.
|
||||
func (s snapshot) DroppedAttributes() int {
|
||||
return s.droppedAttributeCount
|
||||
}
|
||||
|
||||
// DroppedLinks returns the number of links dropped by the span due to limits
|
||||
// being reached.
|
||||
func (s snapshot) DroppedLinks() int {
|
||||
return s.droppedLinkCount
|
||||
}
|
||||
|
||||
// DroppedEvents returns the number of events dropped by the span due to
|
||||
// limits being reached.
|
||||
func (s snapshot) DroppedEvents() int {
|
||||
return s.droppedEventCount
|
||||
}
|
||||
|
||||
// ChildSpanCount returns the count of spans that consider the span a
|
||||
// direct parent.
|
||||
func (s snapshot) ChildSpanCount() int {
|
||||
return s.childSpanCount
|
||||
}
|
746
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
746
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
File diff suppressed because it is too large
Load Diff
16
vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
generated
vendored
@ -16,10 +16,13 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import "context"
|
||||
|
||||
// SpanExporter handles the delivery of SpanSnapshot structs to external
|
||||
// receivers. This is the final component in the trace export pipeline.
|
||||
// SpanExporter handles the delivery of spans to external receivers. This is
|
||||
// the final component in the trace export pipeline.
|
||||
type SpanExporter interface {
|
||||
// ExportSpans exports a batch of SpanSnapshots.
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// ExportSpans exports a batch of spans.
|
||||
//
|
||||
// This function is called synchronously, so there is no concurrency
|
||||
// safety requirement. However, due to the synchronous calling pattern,
|
||||
@ -30,10 +33,15 @@ type SpanExporter interface {
|
||||
// calls this function will not implement any retry logic. All errors
|
||||
// returned by this function are considered unrecoverable and will be
|
||||
// reported to a configured error Handler.
|
||||
ExportSpans(ctx context.Context, ss []*SpanSnapshot) error
|
||||
ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Shutdown notifies the exporter of a pending halt to operations. The
|
||||
// exporter is expected to preform any cleanup or synchronization it
|
||||
// requires while honoring all timeouts and cancellations contained in
|
||||
// the passed context.
|
||||
Shutdown(ctx context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
125
vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
generated
vendored
Normal file
125
vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import "go.opentelemetry.io/otel/sdk/internal/env"
|
||||
|
||||
const (
|
||||
// DefaultAttributeValueLengthLimit is the default maximum allowed
|
||||
// attribute value length, unlimited.
|
||||
DefaultAttributeValueLengthLimit = -1
|
||||
|
||||
// DefaultAttributeCountLimit is the default maximum number of attributes
|
||||
// a span can have.
|
||||
DefaultAttributeCountLimit = 128
|
||||
|
||||
// DefaultEventCountLimit is the default maximum number of events a span
|
||||
// can have.
|
||||
DefaultEventCountLimit = 128
|
||||
|
||||
// DefaultLinkCountLimit is the default maximum number of links a span can
|
||||
// have.
|
||||
DefaultLinkCountLimit = 128
|
||||
|
||||
// DefaultAttributePerEventCountLimit is the default maximum number of
|
||||
// attributes a span event can have.
|
||||
DefaultAttributePerEventCountLimit = 128
|
||||
|
||||
// DefaultAttributePerLinkCountLimit is the default maximum number of
|
||||
// attributes a span link can have.
|
||||
DefaultAttributePerLinkCountLimit = 128
|
||||
)
|
||||
|
||||
// SpanLimits represents the limits of a span.
|
||||
type SpanLimits struct {
|
||||
// AttributeValueLengthLimit is the maximum allowed attribute value length.
|
||||
//
|
||||
// This limit only applies to string and string slice attribute values.
|
||||
// Any string longer than this value will be truncated to this length.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
AttributeValueLengthLimit int
|
||||
|
||||
// AttributeCountLimit is the maximum allowed span attribute count. Any
|
||||
// attribute added to a span once this limit is reached will be dropped.
|
||||
//
|
||||
// Setting this to zero means no attributes will be recorded.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
AttributeCountLimit int
|
||||
|
||||
// EventCountLimit is the maximum allowed span event count. Any event
|
||||
// added to a span once this limit is reached means it will be added but
|
||||
// the oldest event will be dropped.
|
||||
//
|
||||
// Setting this to zero means no events we be recorded.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
EventCountLimit int
|
||||
|
||||
// LinkCountLimit is the maximum allowed span link count. Any link added
|
||||
// to a span once this limit is reached means it will be added but the
|
||||
// oldest link will be dropped.
|
||||
//
|
||||
// Setting this to zero means no links we be recorded.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
LinkCountLimit int
|
||||
|
||||
// AttributePerEventCountLimit is the maximum number of attributes allowed
|
||||
// per span event. Any attribute added after this limit reached will be
|
||||
// dropped.
|
||||
//
|
||||
// Setting this to zero means no attributes will be recorded for events.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
AttributePerEventCountLimit int
|
||||
|
||||
// AttributePerLinkCountLimit is the maximum number of attributes allowed
|
||||
// per span link. Any attribute added after this limit reached will be
|
||||
// dropped.
|
||||
//
|
||||
// Setting this to zero means no attributes will be recorded for links.
|
||||
//
|
||||
// Setting this to a negative value means no limit is applied.
|
||||
AttributePerLinkCountLimit int
|
||||
}
|
||||
|
||||
// NewSpanLimits returns a SpanLimits with all limits set to the value their
|
||||
// corresponding environment variable holds, or the default if unset.
|
||||
//
|
||||
// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
|
||||
// (default: unlimited)
|
||||
//
|
||||
// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
|
||||
//
|
||||
// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
|
||||
//
|
||||
// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
|
||||
// 128)
|
||||
//
|
||||
// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
|
||||
//
|
||||
// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
|
||||
func NewSpanLimits() SpanLimits {
|
||||
return SpanLimits{
|
||||
AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
|
||||
AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit),
|
||||
EventCountLimit: env.SpanEventCount(DefaultEventCountLimit),
|
||||
LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit),
|
||||
AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
|
||||
AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
|
||||
}
|
||||
}
|
11
vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
generated
vendored
@ -24,13 +24,20 @@ import (
|
||||
// and end of a Span's lifecycle, and are called in the order they are
|
||||
// registered.
|
||||
type SpanProcessor interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// OnStart is called when a span is started. It is called synchronously
|
||||
// and should not block.
|
||||
OnStart(parent context.Context, s ReadWriteSpan)
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// OnEnd is called when span is finished. It is called synchronously and
|
||||
// hence not block.
|
||||
OnEnd(s ReadOnlySpan)
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Shutdown is called when the SDK shuts down. Any cleanup or release of
|
||||
// resources held by the processor should be done in this call.
|
||||
@ -41,12 +48,16 @@ type SpanProcessor interface {
|
||||
// All timeouts and cancellations contained in ctx must be honored, this
|
||||
// should not block indefinitely.
|
||||
Shutdown(ctx context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// ForceFlush exports all ended spans to the configured Exporter that have not yet
|
||||
// been exported. It should only be called when absolutely necessary, such as when
|
||||
// using a FaaS provider that may suspend the process after an invocation, but before
|
||||
// the Processor can export the completed spans.
|
||||
ForceFlush(ctx context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
type spanProcessorState struct {
|
||||
|
148
vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
generated
vendored
148
vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
generated
vendored
@ -16,16 +16,15 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
|
||||
|
||||
import (
|
||||
"context"
|
||||
rt "runtime/trace"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type tracer struct {
|
||||
provider *TracerProvider
|
||||
instrumentationLibrary instrumentation.Library
|
||||
provider *TracerProvider
|
||||
instrumentationScope instrumentation.Scope
|
||||
}
|
||||
|
||||
var _ trace.Tracer = &tracer{}
|
||||
@ -34,42 +33,129 @@ var _ trace.Tracer = &tracer{}
|
||||
//
|
||||
// The Span is created with the provided name and as a child of any existing
|
||||
// span context found in the passed context. The created Span will be
|
||||
// configured appropriately by any SpanOption passed. Any Timestamp option
|
||||
// passed will be used as the start time of the Span's life-cycle.
|
||||
func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanOption) (context.Context, trace.Span) {
|
||||
config := trace.NewSpanConfig(options...)
|
||||
// configured appropriately by any SpanOption passed.
|
||||
func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
config := trace.NewSpanStartConfig(options...)
|
||||
|
||||
if ctx == nil {
|
||||
// Prevent trace.ContextWithSpan from panicking.
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// For local spans created by this SDK, track child span count.
|
||||
if p := trace.SpanFromContext(ctx); p != nil {
|
||||
if sdkSpan, ok := p.(*span); ok {
|
||||
if sdkSpan, ok := p.(*recordingSpan); ok {
|
||||
sdkSpan.addChild()
|
||||
}
|
||||
}
|
||||
|
||||
span := startSpanInternal(ctx, tr, name, config)
|
||||
for _, l := range config.Links {
|
||||
span.addLink(l)
|
||||
}
|
||||
span.SetAttributes(config.Attributes...)
|
||||
|
||||
span.tracer = tr
|
||||
|
||||
if span.IsRecording() {
|
||||
s := tr.newSpan(ctx, name, &config)
|
||||
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
|
||||
sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates)
|
||||
for _, sp := range sps {
|
||||
sp.sp.OnStart(ctx, span)
|
||||
sp.sp.OnStart(ctx, rw)
|
||||
}
|
||||
}
|
||||
if rtt, ok := s.(runtimeTracer); ok {
|
||||
ctx = rtt.runtimeTrace(ctx)
|
||||
}
|
||||
|
||||
ctx, span.executionTracerTaskEnd = func(ctx context.Context) (context.Context, func()) {
|
||||
if !rt.IsEnabled() {
|
||||
// Avoid additional overhead if
|
||||
// runtime/trace is not enabled.
|
||||
return ctx, func() {}
|
||||
}
|
||||
nctx, task := rt.NewTask(ctx, name)
|
||||
return nctx, task.End
|
||||
}(ctx)
|
||||
|
||||
return trace.ContextWithSpan(ctx, span), span
|
||||
return trace.ContextWithSpan(ctx, s), s
|
||||
}
|
||||
|
||||
type runtimeTracer interface {
|
||||
// runtimeTrace starts a "runtime/trace".Task for the span and
|
||||
// returns a context containing the task.
|
||||
runtimeTrace(ctx context.Context) context.Context
|
||||
}
|
||||
|
||||
// newSpan returns a new configured span.
|
||||
func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
|
||||
// If told explicitly to make this a new root use a zero value SpanContext
|
||||
// as a parent which contains an invalid trace ID and is not remote.
|
||||
var psc trace.SpanContext
|
||||
if config.NewRoot() {
|
||||
ctx = trace.ContextWithSpanContext(ctx, psc)
|
||||
} else {
|
||||
psc = trace.SpanContextFromContext(ctx)
|
||||
}
|
||||
|
||||
// If there is a valid parent trace ID, use it to ensure the continuity of
|
||||
// the trace. Always generate a new span ID so other components can rely
|
||||
// on a unique span ID, even if the Span is non-recording.
|
||||
var tid trace.TraceID
|
||||
var sid trace.SpanID
|
||||
if !psc.TraceID().IsValid() {
|
||||
tid, sid = tr.provider.idGenerator.NewIDs(ctx)
|
||||
} else {
|
||||
tid = psc.TraceID()
|
||||
sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
|
||||
}
|
||||
|
||||
samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
|
||||
ParentContext: ctx,
|
||||
TraceID: tid,
|
||||
Name: name,
|
||||
Kind: config.SpanKind(),
|
||||
Attributes: config.Attributes(),
|
||||
Links: config.Links(),
|
||||
})
|
||||
|
||||
scc := trace.SpanContextConfig{
|
||||
TraceID: tid,
|
||||
SpanID: sid,
|
||||
TraceState: samplingResult.Tracestate,
|
||||
}
|
||||
if isSampled(samplingResult) {
|
||||
scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
|
||||
} else {
|
||||
scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
|
||||
}
|
||||
sc := trace.NewSpanContext(scc)
|
||||
|
||||
if !isRecording(samplingResult) {
|
||||
return tr.newNonRecordingSpan(sc)
|
||||
}
|
||||
return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
|
||||
}
|
||||
|
||||
// newRecordingSpan returns a new configured recordingSpan.
|
||||
func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
|
||||
startTime := config.Timestamp()
|
||||
if startTime.IsZero() {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
||||
s := &recordingSpan{
|
||||
// Do not pre-allocate the attributes slice here! Doing so will
|
||||
// allocate memory that is likely never going to be used, or if used,
|
||||
// will be over-sized. The default Go compiler has been tested to
|
||||
// dynamically allocate needed space very well. Benchmarking has shown
|
||||
// it to be more performant than what we can predetermine here,
|
||||
// especially for the common use case of few to no added
|
||||
// attributes.
|
||||
|
||||
parent: psc,
|
||||
spanContext: sc,
|
||||
spanKind: trace.ValidateSpanKind(config.SpanKind()),
|
||||
name: name,
|
||||
startTime: startTime,
|
||||
events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
|
||||
links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
|
||||
tracer: tr,
|
||||
}
|
||||
|
||||
for _, l := range config.Links() {
|
||||
s.addLink(l)
|
||||
}
|
||||
|
||||
s.SetAttributes(sr.Attributes...)
|
||||
s.SetAttributes(config.Attributes()...)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// newNonRecordingSpan returns a new configured nonRecordingSpan.
|
||||
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
|
||||
return nonRecordingSpan{tracer: tr, sc: sc}
|
||||
}
|
||||
|
Reference in New Issue
Block a user