mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
rebase: bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.1 to 1.16.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.15.1...v1.16.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
f815eb46fd
commit
5cf55eda72
4
go.mod
4
go.mod
@ -25,7 +25,7 @@ require (
|
|||||||
github.com/onsi/ginkgo/v2 v2.10.0
|
github.com/onsi/ginkgo/v2 v2.10.0
|
||||||
github.com/onsi/gomega v1.27.8
|
github.com/onsi/gomega v1.27.8
|
||||||
github.com/pkg/xattr v0.4.9
|
github.com/pkg/xattr v0.4.9
|
||||||
github.com/prometheus/client_golang v1.15.1
|
github.com/prometheus/client_golang v1.16.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
golang.org/x/crypto v0.9.0
|
golang.org/x/crypto v0.9.0
|
||||||
golang.org/x/net v0.10.0
|
golang.org/x/net v0.10.0
|
||||||
@ -132,7 +132,7 @@ require (
|
|||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.42.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/procfs v0.10.1 // indirect
|
||||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||||
github.com/spf13/cobra v1.6.0 // indirect
|
github.com/spf13/cobra v1.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
8
go.sum
8
go.sum
@ -1010,8 +1010,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
|
|||||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@ -1044,8 +1044,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
|
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
|
4
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -18,12 +18,12 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
|
10
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -401,7 +401,7 @@ type HistogramOpts struct {
|
|||||||
// Histogram by a Prometheus server with that feature enabled (requires
|
// Histogram by a Prometheus server with that feature enabled (requires
|
||||||
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
|
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
|
||||||
// the whole float64 range (with the exception of the “zero” bucket, see
|
// the whole float64 range (with the exception of the “zero” bucket, see
|
||||||
// SparseBucketsZeroThreshold below). From any one bucket to the next,
|
// NativeHistogramZeroThreshold below). From any one bucket to the next,
|
||||||
// the width of the bucket grows by a constant
|
// the width of the bucket grows by a constant
|
||||||
// factor. NativeHistogramBucketFactor provides an upper bound for this
|
// factor. NativeHistogramBucketFactor provides an upper bound for this
|
||||||
// factor (exception see below). The smaller
|
// factor (exception see below). The smaller
|
||||||
@ -432,7 +432,7 @@ type HistogramOpts struct {
|
|||||||
// bucket. For best results, this should be close to a bucket
|
// bucket. For best results, this should be close to a bucket
|
||||||
// boundary. This is usually the case if picking a power of two. If
|
// boundary. This is usually the case if picking a power of two. If
|
||||||
// NativeHistogramZeroThreshold is left at zero,
|
// NativeHistogramZeroThreshold is left at zero,
|
||||||
// DefSparseBucketsZeroThreshold is used as the threshold. To configure
|
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
|
||||||
// a zero bucket with an actual threshold of zero (i.e. only
|
// a zero bucket with an actual threshold of zero (i.e. only
|
||||||
// observations of precisely zero will go into the zero bucket), set
|
// observations of precisely zero will go into the zero bucket), set
|
||||||
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
|
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
|
||||||
@ -639,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
|
|||||||
if frac == 0.5 {
|
if frac == 0.5 {
|
||||||
key--
|
key--
|
||||||
}
|
}
|
||||||
div := 1 << -schema
|
offset := (1 << -schema) - 1
|
||||||
key = (key + div - 1) / div
|
key = (key + offset) >> -schema
|
||||||
}
|
}
|
||||||
if isInf {
|
if isInf {
|
||||||
key++
|
key++
|
||||||
@ -817,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// limitSparsebuckets applies a strategy to limit the number of populated sparse
|
// limitBuckets applies a strategy to limit the number of populated sparse
|
||||||
// buckets. It's generally best effort, and there are situations where the
|
// buckets. It's generally best effort, and there are situations where the
|
||||||
// number can go higher (if even the lowest resolution isn't enough to reduce
|
// number can go higher (if even the lowest resolution isn't enough to reduce
|
||||||
// the number sufficiently, or if the provided counts aren't fully updated yet
|
// the number sufficiently, or if the provided counts aren't fully updated yet
|
||||||
|
13
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -37,6 +37,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -50,6 +51,7 @@ const (
|
|||||||
contentTypeHeader = "Content-Type"
|
contentTypeHeader = "Content-Type"
|
||||||
contentEncodingHeader = "Content-Encoding"
|
contentEncodingHeader = "Content-Encoding"
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
|
processStartTimeHeader = "Process-Start-Time-Unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
var gzipPool = sync.Pool{
|
var gzipPool = sync.Pool{
|
||||||
@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
|
if !opts.ProcessStartTime.IsZero() {
|
||||||
|
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
|
||||||
|
}
|
||||||
if inFlightSem != nil {
|
if inFlightSem != nil {
|
||||||
select {
|
select {
|
||||||
case inFlightSem <- struct{}{}: // All good, carry on.
|
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||||
@ -366,6 +371,14 @@ type HandlerOpts struct {
|
|||||||
// (which changes the identity of the resulting series on the Prometheus
|
// (which changes the identity of the resulting series on the Prometheus
|
||||||
// server).
|
// server).
|
||||||
EnableOpenMetrics bool
|
EnableOpenMetrics bool
|
||||||
|
// ProcessStartTime allows setting process start timevalue that will be exposed
|
||||||
|
// with "Process-Start-Time-Unix" response header along with the metrics
|
||||||
|
// payload. This allow callers to have efficient transformations to cumulative
|
||||||
|
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
|
||||||
|
// scrape target.
|
||||||
|
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
|
||||||
|
// exposition format.
|
||||||
|
ProcessStartTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
|
18
vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
generated
vendored
18
vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
generated
vendored
@ -287,17 +287,15 @@ func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
|
|||||||
func metricUnits(m string) (unit, base string, ok bool) {
|
func metricUnits(m string) (unit, base string, ok bool) {
|
||||||
ss := strings.Split(m, "_")
|
ss := strings.Split(m, "_")
|
||||||
|
|
||||||
for unit, base := range units {
|
|
||||||
// Also check for "no prefix".
|
|
||||||
for _, p := range append(unitPrefixes, "") {
|
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
// Attempt to explicitly match a known unit with a known prefix,
|
if base, found := units[s]; found {
|
||||||
// as some words may look like "units" when matching suffix.
|
return s, base, true
|
||||||
//
|
}
|
||||||
// As an example, "thermometers" should not match "meters", but
|
|
||||||
// "kilometers" should.
|
for _, p := range unitPrefixes {
|
||||||
if s == p+unit {
|
if strings.HasPrefix(s, p) {
|
||||||
return p + unit, base, true
|
if base, found := units[s[len(p):]]; found {
|
||||||
|
return s, base, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
35
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
35
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -20,6 +20,24 @@ import (
|
|||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var labelsPool = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make(Labels)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLabelsFromPool() Labels {
|
||||||
|
return labelsPool.Get().(Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putLabelsToPool(labels Labels) {
|
||||||
|
for k := range labels {
|
||||||
|
delete(labels, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
labelsPool.Put(labels)
|
||||||
|
}
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that differ in
|
// MetricVec is a Collector to bundle metrics of the same name that differ in
|
||||||
// their label values. MetricVec is not used directly but as a building block
|
// their label values. MetricVec is not used directly but as a building block
|
||||||
// for implementations of vectors of a given metric type, like GaugeVec,
|
// for implementations of vectors of a given metric type, like GaugeVec,
|
||||||
@ -93,6 +111,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|||||||
// there for pros and cons of the two methods.
|
// there for pros and cons of the two methods.
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
func (m *MetricVec) Delete(labels Labels) bool {
|
||||||
labels = constrainLabels(m.desc, labels)
|
labels = constrainLabels(m.desc, labels)
|
||||||
|
defer putLabelsToPool(labels)
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -109,6 +129,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
|||||||
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
||||||
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||||
labels = constrainLabels(m.desc, labels)
|
labels = constrainLabels(m.desc, labels)
|
||||||
|
defer putLabelsToPool(labels)
|
||||||
|
|
||||||
return m.metricMap.deleteByLabels(labels, m.curry)
|
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,6 +251,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|||||||
// for example GaugeVec.
|
// for example GaugeVec.
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||||
labels = constrainLabels(m.desc, labels)
|
labels = constrainLabels(m.desc, labels)
|
||||||
|
defer putLabelsToPool(labels)
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -647,15 +671,16 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func constrainLabels(desc *Desc, labels Labels) Labels {
|
func constrainLabels(desc *Desc, labels Labels) Labels {
|
||||||
constrainedValues := make(Labels, len(labels))
|
constrainedLabels := getLabelsFromPool()
|
||||||
for l, v := range labels {
|
for l, v := range labels {
|
||||||
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
|
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
|
||||||
constrainedValues[l] = desc.variableLabels[i].Constrain(v)
|
v = desc.variableLabels[i].Constrain(v)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
constrainedValues[l] = v
|
|
||||||
|
constrainedLabels[l] = v
|
||||||
}
|
}
|
||||||
return constrainedValues
|
|
||||||
|
return constrainedLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
|
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.49.0
|
GOLANGCI_LINT_VERSION ?= v1.51.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
|||||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
|
||||||
|
|
||||||
ifeq ($(GOHOSTARCH),amd64)
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
# Only supported on amd64
|
# Only supported on amd64
|
||||||
@ -205,7 +207,7 @@ common-tarball: promu
|
|||||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||||
-f $(DOCKERFILE_PATH) \
|
-f $(DOCKERFILE_PATH) \
|
||||||
--build-arg ARCH="$*" \
|
--build-arg ARCH="$*" \
|
||||||
--build-arg OS="linux" \
|
--build-arg OS="linux" \
|
||||||
@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
|
|||||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-manifest
|
.PHONY: common-docker-manifest
|
||||||
common-docker-manifest:
|
common-docker-manifest:
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu: $(PROMU)
|
promu: $(PROMU)
|
||||||
|
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
|||||||
// kernel data structures.
|
// kernel data structures.
|
||||||
type FS struct {
|
type FS struct {
|
||||||
proc fs.FS
|
proc fs.FS
|
||||||
|
real bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return FS{}, err
|
return FS{}, err
|
||||||
}
|
}
|
||||||
return FS{fs}, nil
|
|
||||||
|
real, err := isRealProc(mountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return FS{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return FS{fs, real}, nil
|
||||||
}
|
}
|
||||||
|
23
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
Normal file
23
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build netbsd || openbsd || solaris || windows
|
||||||
|
// +build netbsd openbsd solaris windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
// isRealProc returns true on architectures that don't have a Type argument
|
||||||
|
// in their Statfs_t struct
|
||||||
|
func isRealProc(mountPoint string) (bool, error) {
|
||||||
|
return true, nil
|
||||||
|
}
|
33
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
Normal file
33
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !netbsd && !openbsd && !solaris && !windows
|
||||||
|
// +build !netbsd,!openbsd,!solaris,!windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
|
||||||
|
func isRealProc(mountPoint string) (bool, error) {
|
||||||
|
stat := syscall.Statfs_t{}
|
||||||
|
err := syscall.Statfs(mountPoint, &stat)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
|
||||||
|
return stat.Type == 0x9fa0, nil
|
||||||
|
}
|
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
|
|||||||
return us, nil
|
return us, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parses a uint64 from given hex in string.
|
||||||
|
func ParseHexUint64s(ss []string) ([]*uint64, error) {
|
||||||
|
us := make([]*uint64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, &u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
func ReadUintFromFile(path string) (uint64, error) {
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
data, err := os.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
|
6
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
6
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -186,6 +186,8 @@ type NFSOperationStats struct {
|
|||||||
CumulativeTotalResponseMilliseconds uint64
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestMilliseconds uint64
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
|
// The average time from the point the client sends RPC requests until it receives the response.
|
||||||
|
AverageRTTMilliseconds float64
|
||||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||||
Errors uint64
|
Errors uint64
|
||||||
}
|
}
|
||||||
@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
|
|
||||||
ns = append(ns, n)
|
ns = append(ns, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
opStats := NFSOperationStats{
|
opStats := NFSOperationStats{
|
||||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||||
Requests: ns[0],
|
Requests: ns[0],
|
||||||
@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
CumulativeTotalResponseMilliseconds: ns[6],
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestMilliseconds: ns[7],
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
}
|
}
|
||||||
|
if ns[0] != 0 {
|
||||||
|
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
|
||||||
|
}
|
||||||
|
|
||||||
if len(ns) > 8 {
|
if len(ns) > 8 {
|
||||||
opStats.Errors = ns[8]
|
opStats.Errors = ns[8]
|
||||||
|
88
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
88
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
@ -18,7 +18,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
@ -28,9 +27,13 @@ import (
|
|||||||
// and contains netfilter conntrack statistics at one CPU core.
|
// and contains netfilter conntrack statistics at one CPU core.
|
||||||
type ConntrackStatEntry struct {
|
type ConntrackStatEntry struct {
|
||||||
Entries uint64
|
Entries uint64
|
||||||
|
Searched uint64
|
||||||
Found uint64
|
Found uint64
|
||||||
|
New uint64
|
||||||
Invalid uint64
|
Invalid uint64
|
||||||
Ignore uint64
|
Ignore uint64
|
||||||
|
Delete uint64
|
||||||
|
DeleteList uint64
|
||||||
Insert uint64
|
Insert uint64
|
||||||
InsertFailed uint64
|
InsertFailed uint64
|
||||||
Drop uint64
|
Drop uint64
|
||||||
@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||||||
|
|
||||||
// Parses a ConntrackStatEntry from given array of fields.
|
// Parses a ConntrackStatEntry from given array of fields.
|
||||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||||
if len(fields) != 17 {
|
entries, err := util.ParseHexUint64s(fields)
|
||||||
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
|
||||||
}
|
|
||||||
entry := &ConntrackStatEntry{}
|
|
||||||
|
|
||||||
entries, err := parseConntrackStatField(fields[0])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
|
||||||
}
|
}
|
||||||
entry.Entries = entries
|
numEntries := len(entries)
|
||||||
|
if numEntries < 16 || numEntries > 17 {
|
||||||
found, err := parseConntrackStatField(fields[2])
|
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
entry.Found = found
|
|
||||||
|
|
||||||
invalid, err := parseConntrackStatField(fields[4])
|
stats := &ConntrackStatEntry{
|
||||||
if err != nil {
|
Entries: *entries[0],
|
||||||
return nil, err
|
Searched: *entries[1],
|
||||||
|
Found: *entries[2],
|
||||||
|
New: *entries[3],
|
||||||
|
Invalid: *entries[4],
|
||||||
|
Ignore: *entries[5],
|
||||||
|
Delete: *entries[6],
|
||||||
|
DeleteList: *entries[7],
|
||||||
|
Insert: *entries[8],
|
||||||
|
InsertFailed: *entries[9],
|
||||||
|
Drop: *entries[10],
|
||||||
|
EarlyDrop: *entries[11],
|
||||||
}
|
}
|
||||||
entry.Invalid = invalid
|
|
||||||
|
|
||||||
ignore, err := parseConntrackStatField(fields[5])
|
// Ignore missing search_restart on Linux < 2.6.35.
|
||||||
if err != nil {
|
if numEntries == 17 {
|
||||||
return nil, err
|
stats.SearchRestart = *entries[16]
|
||||||
}
|
}
|
||||||
entry.Ignore = ignore
|
|
||||||
|
|
||||||
insert, err := parseConntrackStatField(fields[8])
|
return stats, nil
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.Insert = insert
|
|
||||||
|
|
||||||
insertFailed, err := parseConntrackStatField(fields[9])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.InsertFailed = insertFailed
|
|
||||||
|
|
||||||
drop, err := parseConntrackStatField(fields[10])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.Drop = drop
|
|
||||||
|
|
||||||
earlyDrop, err := parseConntrackStatField(fields[11])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.EarlyDrop = earlyDrop
|
|
||||||
|
|
||||||
searchRestart, err := parseConntrackStatField(fields[16])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entry.SearchRestart = searchRestart
|
|
||||||
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parses a uint64 from given hex in string.
|
|
||||||
func parseConntrackStatField(field string) (uint64, error) {
|
|
||||||
val, err := strconv.ParseUint(field, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
|
|
||||||
}
|
|
||||||
return val, err
|
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
5
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -76,6 +76,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
|||||||
s := bufio.NewScanner(r)
|
s := bufio.NewScanner(r)
|
||||||
|
|
||||||
var stats []SoftnetStat
|
var stats []SoftnetStat
|
||||||
|
cpuIndex := 0
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
columns := strings.Fields(s.Text())
|
columns := strings.Fields(s.Text())
|
||||||
width := len(columns)
|
width := len(columns)
|
||||||
@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
|||||||
|
|
||||||
softnetStat.SoftnetBacklogLen = us[0]
|
softnetStat.SoftnetBacklogLen = us[0]
|
||||||
softnetStat.Index = us[1]
|
softnetStat.Index = us[1]
|
||||||
|
} else {
|
||||||
|
// For older kernels, create the Index based on the scan line number.
|
||||||
|
softnetStat.Index = uint32(cpuIndex)
|
||||||
}
|
}
|
||||||
softnetStat.Width = width
|
softnetStat.Width = width
|
||||||
stats = append(stats, softnetStat)
|
stats = append(stats, softnetStat)
|
||||||
|
cpuIndex++
|
||||||
}
|
}
|
||||||
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
|
182
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
Normal file
182
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wireless models the content of /proc/net/wireless.
|
||||||
|
type Wireless struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Status is the current 4-digit hex value status of the interface.
|
||||||
|
Status uint64
|
||||||
|
|
||||||
|
// QualityLink is the link quality.
|
||||||
|
QualityLink int
|
||||||
|
|
||||||
|
// QualityLevel is the signal gain (dBm).
|
||||||
|
QualityLevel int
|
||||||
|
|
||||||
|
// QualityNoise is the signal noise baseline (dBm).
|
||||||
|
QualityNoise int
|
||||||
|
|
||||||
|
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
|
||||||
|
DiscardedNwid int
|
||||||
|
|
||||||
|
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
|
||||||
|
DiscardedCrypt int
|
||||||
|
|
||||||
|
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
|
||||||
|
DiscardedFrag int
|
||||||
|
|
||||||
|
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
|
||||||
|
DiscardedRetry int
|
||||||
|
|
||||||
|
// DiscardedMisc is the number of discarded packets for other reasons.
|
||||||
|
DiscardedMisc int
|
||||||
|
|
||||||
|
// MissedBeacon is the number of missed beacons/superframe.
|
||||||
|
MissedBeacon int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wireless returns kernel wireless statistics.
|
||||||
|
func (fs FS) Wireless() ([]*Wireless, error) {
|
||||||
|
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := parseWireless(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse wireless: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseWireless parses the contents of /proc/net/wireless.
|
||||||
|
/*
|
||||||
|
Inter-| sta-| Quality | Discarded packets | Missed | WE
|
||||||
|
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
|
||||||
|
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
|
||||||
|
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
|
||||||
|
*/
|
||||||
|
func parseWireless(r io.Reader) ([]*Wireless, error) {
|
||||||
|
var (
|
||||||
|
interfaces []*Wireless
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
)
|
||||||
|
|
||||||
|
for n := 0; scanner.Scan(); n++ {
|
||||||
|
// Skip the 2 header lines.
|
||||||
|
if n < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
line := scanner.Text()
|
||||||
|
|
||||||
|
parts := strings.Split(line, ":")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := strings.TrimSpace(parts[0])
|
||||||
|
stats := strings.Fields(parts[1])
|
||||||
|
|
||||||
|
if len(stats) < 10 {
|
||||||
|
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
|
||||||
|
}
|
||||||
|
|
||||||
|
status, err := strconv.ParseUint(stats[0], 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dnwid, err := strconv.Atoi(stats[4])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dcrypt, err := strconv.Atoi(stats[5])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dfrag, err := strconv.Atoi(stats[6])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dretry, err := strconv.Atoi(stats[7])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dmisc, err := strconv.Atoi(stats[8])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mbeacon, err := strconv.Atoi(stats[9])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &Wireless{
|
||||||
|
Name: name,
|
||||||
|
Status: status,
|
||||||
|
QualityLink: qlink,
|
||||||
|
QualityLevel: qlevel,
|
||||||
|
QualityNoise: qnoise,
|
||||||
|
DiscardedNwid: dnwid,
|
||||||
|
DiscardedCrypt: dcrypt,
|
||||||
|
DiscardedFrag: dfrag,
|
||||||
|
DiscardedRetry: dretry,
|
||||||
|
DiscardedMisc: dmisc,
|
||||||
|
MissedBeacon: mbeacon,
|
||||||
|
}
|
||||||
|
|
||||||
|
interfaces = append(interfaces, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return interfaces, nil
|
||||||
|
}
|
21
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
21
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
@ -15,7 +15,6 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
var netStatsTotal []NetStat
|
var netStatsTotal []NetStat
|
||||||
|
|
||||||
for _, filePath := range statFiles {
|
for _, filePath := range statFiles {
|
||||||
file, err := os.Open(filePath)
|
procNetstat, err := parseNetstat(filePath)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
procNetstat, err := parseNetstat(file)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
|
|
||||||
// parseNetstat parses the metrics from `/proc/net/stat/` file
|
// parseNetstat parses the metrics from `/proc/net/stat/` file
|
||||||
// and returns a NetStat structure.
|
// and returns a NetStat structure.
|
||||||
func parseNetstat(r io.Reader) (NetStat, error) {
|
func parseNetstat(filePath string) (NetStat, error) {
|
||||||
var (
|
netStat := NetStat{
|
||||||
scanner = bufio.NewScanner(r)
|
|
||||||
netStat = NetStat{
|
|
||||||
Stats: make(map[string][]uint64),
|
Stats: make(map[string][]uint64),
|
||||||
}
|
}
|
||||||
)
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return netStat, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
|
|
||||||
// First string is always a header for stats
|
// First string is always a header for stats
|
||||||
|
22
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
22
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/fs"
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,7 +29,7 @@ type Proc struct {
|
|||||||
// The process ID.
|
// The process ID.
|
||||||
PID int
|
PID int
|
||||||
|
|
||||||
fs fs.FS
|
fs FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// Procs represents a list of Proc structs.
|
// Procs represents a list of Proc structs.
|
||||||
@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
|
|||||||
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: pid, fs: fs.proc}, nil
|
return Proc{PID: pid, fs: fs}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes.
|
// AllProcs returns a list of all currently available processes.
|
||||||
@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p = append(p, Proc{PID: int(pid), fs: fs.proc})
|
p = append(p, Proc{PID: int(pid), fs: fs})
|
||||||
}
|
}
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
|
|||||||
// FileDescriptorsLen returns the number of currently open file descriptors of
|
// FileDescriptorsLen returns the number of currently open file descriptors of
|
||||||
// a process.
|
// a process.
|
||||||
func (p Proc) FileDescriptorsLen() (int, error) {
|
func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
|
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
|
||||||
|
if p.fs.real {
|
||||||
|
stat, err := os.Stat(p.path("fd"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size := stat.Size()
|
||||||
|
if size > 0 {
|
||||||
|
return int(size), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fds, err := p.fileDescriptors()
|
fds, err := p.fileDescriptors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) path(pa ...string) string {
|
func (p Proc) path(pa ...string) string {
|
||||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileDescriptorsInfo retrieves information about all file descriptors of
|
// FileDescriptorsInfo retrieves information about all file descriptors of
|
||||||
|
6
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
6
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -18,7 +18,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/fs"
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -112,7 +111,7 @@ type ProcStat struct {
|
|||||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||||
DelayAcctBlkIOTicks uint64
|
DelayAcctBlkIOTicks uint64
|
||||||
|
|
||||||
proc fs.FS
|
proc FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
|
|||||||
|
|
||||||
// StartTime returns the unix timestamp of the process in seconds.
|
// StartTime returns the unix timestamp of the process in seconds.
|
||||||
func (s ProcStat) StartTime() (float64, error) {
|
func (s ProcStat) StartTime() (float64, error) {
|
||||||
fs := FS{proc: s.proc}
|
stat, err := s.proc.Stat()
|
||||||
stat, err := fs.Stat()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
32
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -15,6 +15,7 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -76,6 +77,9 @@ type ProcStatus struct {
|
|||||||
UIDs [4]string
|
UIDs [4]string
|
||||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||||
GIDs [4]string
|
GIDs [4]string
|
||||||
|
|
||||||
|
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
||||||
|
CpusAllowedList []uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatus returns the current status information of the process.
|
// NewStatus returns the current status information of the process.
|
||||||
@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
|||||||
s.VoluntaryCtxtSwitches = vUint
|
s.VoluntaryCtxtSwitches = vUint
|
||||||
case "nonvoluntary_ctxt_switches":
|
case "nonvoluntary_ctxt_switches":
|
||||||
s.NonVoluntaryCtxtSwitches = vUint
|
s.NonVoluntaryCtxtSwitches = vUint
|
||||||
|
case "Cpus_allowed_list":
|
||||||
|
s.CpusAllowedList = calcCpusAllowedList(vString)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalCtxtSwitches returns the total context switch.
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
||||||
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func calcCpusAllowedList(cpuString string) []uint64 {
|
||||||
|
s := strings.Split(cpuString, ",")
|
||||||
|
|
||||||
|
var g []uint64
|
||||||
|
|
||||||
|
for _, cpu := range s {
|
||||||
|
// parse cpu ranges, example: 1-3=[1,2,3]
|
||||||
|
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
|
||||||
|
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
|
||||||
|
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
|
||||||
|
|
||||||
|
for i := startCPU; i <= endCPU; i++ {
|
||||||
|
g = append(g, i)
|
||||||
|
}
|
||||||
|
} else if len(l) == 1 {
|
||||||
|
cpu, _ := strconv.ParseUint(l[0], 10, 64)
|
||||||
|
g = append(g, cpu)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
9
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
9
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
|
|
||||||
|
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
|
||||||
}
|
}
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
|
|||||||
if _, err := os.Stat(taskPath); err != nil {
|
if _, err := os.Stat(taskPath); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
|
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread returns a process for a given TID of Proc.
|
// Thread returns a process for a given TID of Proc.
|
||||||
func (proc Proc) Thread(tid int) (Proc, error) {
|
func (proc Proc) Thread(tid int) (Proc, error) {
|
||||||
tfs := fsi.FS(proc.path("task"))
|
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
|
||||||
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
|
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: tid, fs: tfs}, nil
|
return Proc{PID: tid, fs: tfs}, nil
|
||||||
|
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
@ -506,7 +506,7 @@ github.com/pkg/xattr
|
|||||||
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||||
## explicit
|
## explicit
|
||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/prometheus/client_golang v1.15.1
|
# github.com/prometheus/client_golang v1.16.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/prometheus/client_golang/prometheus
|
github.com/prometheus/client_golang/prometheus
|
||||||
github.com/prometheus/client_golang/prometheus/collectors
|
github.com/prometheus/client_golang/prometheus/collectors
|
||||||
@ -522,8 +522,8 @@ github.com/prometheus/client_model/go
|
|||||||
github.com/prometheus/common/expfmt
|
github.com/prometheus/common/expfmt
|
||||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||||
github.com/prometheus/common/model
|
github.com/prometheus/common/model
|
||||||
# github.com/prometheus/procfs v0.9.0
|
# github.com/prometheus/procfs v0.10.1
|
||||||
## explicit; go 1.18
|
## explicit; go 1.19
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
github.com/prometheus/procfs/internal/fs
|
github.com/prometheus/procfs/internal/fs
|
||||||
github.com/prometheus/procfs/internal/util
|
github.com/prometheus/procfs/internal/util
|
||||||
|
Loading…
Reference in New Issue
Block a user