rebase: bump github.com/prometheus/client_golang from 1.12.0 to 1.12.1

Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.0 to 1.12.1.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.12.0...v1.12.1)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2022-01-31 20:12:30 +00:00 committed by mergify[bot]
parent f6894909d7
commit 3014b722ad
6 changed files with 146 additions and 29 deletions

2
go.mod
View File

@ -20,7 +20,7 @@ require (
github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.18.0 github.com/onsi/gomega v1.18.0
github.com/pborman/uuid v1.2.1 github.com/pborman/uuid v1.2.1
github.com/prometheus/client_golang v1.12.0 github.com/prometheus/client_golang v1.12.1
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9

4
go.sum
View File

@ -936,8 +936,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=

View File

@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) { func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self ch <- c.self
} }
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -20,6 +20,7 @@ import (
"math" "math"
"runtime" "runtime"
"runtime/metrics" "runtime/metrics"
"strings"
"sync" "sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
@ -31,10 +32,14 @@ import (
type goCollector struct { type goCollector struct {
base baseGoCollector base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package. // rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample rmSampleMap map[string]*metrics.Sample
rmMetrics []Metric rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced. // With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics // From that point on, metric names produced by the runtime/metrics
@ -52,13 +57,24 @@ type goCollector struct {
// Deprecated: Use collectors.NewGoCollector instead. // Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector { func NewGoCollector() Collector {
descriptions := metrics.All() descriptions := metrics.All()
descMap := make(map[string]*metrics.Description)
for i := range descriptions { // Collect all histogram samples so that we can get their buckets.
descMap[descriptions[i].Name] = &descriptions[i] // The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
} }
// Generate a Desc and ValueType for each runtime/metrics metric. // Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]Metric, 0, len(descriptions)) metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions)) sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions)) sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions { for i := range descriptions {
@ -76,9 +92,10 @@ func NewGoCollector() Collector {
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m Metric var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram { if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name] _, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram( m = newBatchHistogram(
NewDesc( NewDesc(
BuildFQName(namespace, subsystem, name), BuildFQName(namespace, subsystem, name),
@ -86,6 +103,7 @@ func NewGoCollector() Collector {
nil, nil,
nil, nil,
), ),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum, hasSum,
) )
} else if d.Cumulative { } else if d.Cumulative {
@ -130,9 +148,25 @@ func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics. // Collect base non-memory metrics.
c.base.Collect(ch) c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer. // Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf) metrics.Read(c.rmSampleBuf)
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf { for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient // N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In // than checking for the Counter and Gauge interface implementations. In
@ -157,7 +191,6 @@ func (c *goCollector) Collect(ch chan<- Metric) {
panic("unexpected metric type") panic("unexpected metric type")
} }
} }
// ms is a dummy MemStats that we populate ourselves so that we can // ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it. // populate the old metrics from it.
var ms runtime.MemStats var ms runtime.MemStats
@ -280,13 +313,27 @@ type batchHistogram struct {
// but Write calls may operate concurrently with updates. // but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare. // Contention between these two sources should be rare.
mu sync.Mutex mu sync.Mutex
buckets []float64 // Inclusive lower bounds. buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64 counts []uint64
sum float64 // Used if hasSum is true. sum float64 // Used if hasSum is true.
} }
func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram { // newBatchHistogram creates a new batch histogram value with the given
h := &batchHistogram{desc: desc, hasSum: hasSum} // Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h) h.init(h)
return h return h
} }
@ -294,28 +341,25 @@ func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
// update updates the batchHistogram from a runtime/metrics histogram. // update updates the batchHistogram from a runtime/metrics histogram.
// //
// sum must be provided if the batchHistogram was created to have an exact sum. // sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets counts, buckets := his.Counts, his.Buckets
// Skip a -Inf bucket altogether. It's not clear how to represent that.
if math.IsInf(buckets[0], -1) {
buckets = buckets[1:]
counts = counts[1:]
}
h.mu.Lock() h.mu.Lock()
defer h.mu.Unlock() defer h.mu.Unlock()
// Check if we're initialized. // Clear buckets.
if h.buckets == nil { for i := range h.counts {
// Make copies of counts and buckets. It's really important h.counts[i] = 0
// that we don't retain his.Counts or his.Buckets anywhere since }
// it's going to get reused. // Copy and reduce buckets.
h.buckets = make([]float64, len(buckets)) var j int
copy(h.buckets, buckets) for i, count := range counts {
h.counts[j] += count
h.counts = make([]uint64, len(counts)) if buckets[i+1] == h.buckets[j+1] {
j++
}
} }
copy(h.counts, counts)
if h.hasSum { if h.hasSum {
h.sum = sum h.sum = sum
} }

View File

@ -17,6 +17,7 @@
package internal package internal
import ( import (
"math"
"path" "path"
"runtime/metrics" "runtime/metrics"
"strings" "strings"
@ -75,3 +76,67 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
} }
return namespace, subsystem, name, valid return namespace, subsystem, name, valid
} }
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}

2
vendor/modules.txt vendored
View File

@ -401,7 +401,7 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0 # github.com/pmezard/go-difflib v1.0.0
## explicit ## explicit
github.com/pmezard/go-difflib/difflib github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.12.0 # github.com/prometheus/client_golang v1.12.1
## explicit; go 1.13 ## explicit; go 1.13
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors