mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to v1.20.0
updated kubernetes packages to latest release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
4abe128bd8
commit
83559144b1
2
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -309,6 +309,8 @@ type CounterFunc interface {
|
||||
// provided function must be concurrency-safe. The function should also honor
|
||||
// the contract for a Counter (values only go up, not down), but compliance will
|
||||
// not be checked.
|
||||
//
|
||||
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
|
||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
3
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
@ -606,7 +607,7 @@ func NewConstHistogram(
|
||||
}
|
||||
|
||||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
||||
// NewConstMetric would have returned an error.
|
||||
// NewConstHistogram would have returned an error.
|
||||
func MustNewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@ -33,18 +33,22 @@ var (
|
||||
)
|
||||
|
||||
type processMemoryCounters struct {
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
||||
// System interface description
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
|
||||
|
||||
// Refer to the Golang internal implementation
|
||||
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
|
||||
_ uint32
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uint64
|
||||
WorkingSetSize uint64
|
||||
QuotaPeakPagedPoolUsage uint64
|
||||
QuotaPagedPoolUsage uint64
|
||||
QuotaPeakNonPagedPoolUsage uint64
|
||||
QuotaNonPagedPoolUsage uint64
|
||||
PagefileUsage uint64
|
||||
PeakPagefileUsage uint64
|
||||
PrivateUsage uint64
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/beorn7/perks/quantile"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
46
vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
generated
vendored
Normal file
46
vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||
)
|
||||
|
||||
// CollectAndLint registers the provided Collector with a newly created pedantic
|
||||
// Registry. It then calls GatherAndLint with that Registry and with the
|
||||
// provided metricNames.
|
||||
func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
if err := reg.Register(c); err != nil {
|
||||
return nil, fmt.Errorf("registering collector failed: %s", err)
|
||||
}
|
||||
return GatherAndLint(reg, metricNames...)
|
||||
}
|
||||
|
||||
// GatherAndLint gathers all metrics from the provided Gatherer and checks them
|
||||
// with the linter in the promlint package. If any metricNames are provided,
|
||||
// only metrics with those names are checked.
|
||||
func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) {
|
||||
got, err := g.Gather()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gathering metrics failed: %s", err)
|
||||
}
|
||||
if metricNames != nil {
|
||||
got = filterMetrics(got, metricNames)
|
||||
}
|
||||
return promlint.NewWithMetricFamilies(got).Lint()
|
||||
}
|
386
vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
generated
vendored
Normal file
386
vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promlint provides a linter for Prometheus metrics.
|
||||
package promlint
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// A Linter is a Prometheus metrics linter. It identifies issues with metric
|
||||
// names, types, and metadata, and reports them to the caller.
|
||||
type Linter struct {
|
||||
// The linter will read metrics in the Prometheus text format from r and
|
||||
// then lint it, _and_ it will lint the metrics provided directly as
|
||||
// MetricFamily proto messages in mfs. Note, however, that the current
|
||||
// constructor functions New and NewWithMetricFamilies only ever set one
|
||||
// of them.
|
||||
r io.Reader
|
||||
mfs []*dto.MetricFamily
|
||||
}
|
||||
|
||||
// A Problem is an issue detected by a Linter.
|
||||
type Problem struct {
|
||||
// The name of the metric indicated by this Problem.
|
||||
Metric string
|
||||
|
||||
// A description of the issue for this Problem.
|
||||
Text string
|
||||
}
|
||||
|
||||
// newProblem is helper function to create a Problem.
|
||||
func newProblem(mf *dto.MetricFamily, text string) Problem {
|
||||
return Problem{
|
||||
Metric: mf.GetName(),
|
||||
Text: text,
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Linter that reads an input stream of Prometheus metrics in
|
||||
// the Prometheus text exposition format.
|
||||
func New(r io.Reader) *Linter {
|
||||
return &Linter{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithMetricFamilies creates a new Linter that reads from a slice of
|
||||
// MetricFamily protobuf messages.
|
||||
func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter {
|
||||
return &Linter{
|
||||
mfs: mfs,
|
||||
}
|
||||
}
|
||||
|
||||
// Lint performs a linting pass, returning a slice of Problems indicating any
|
||||
// issues found in the metrics stream. The slice is sorted by metric name
|
||||
// and issue description.
|
||||
func (l *Linter) Lint() ([]Problem, error) {
|
||||
var problems []Problem
|
||||
|
||||
if l.r != nil {
|
||||
d := expfmt.NewDecoder(l.r, expfmt.FmtText)
|
||||
|
||||
mf := &dto.MetricFamily{}
|
||||
for {
|
||||
if err := d.Decode(mf); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
problems = append(problems, lint(mf)...)
|
||||
}
|
||||
}
|
||||
for _, mf := range l.mfs {
|
||||
problems = append(problems, lint(mf)...)
|
||||
}
|
||||
|
||||
// Ensure deterministic output.
|
||||
sort.SliceStable(problems, func(i, j int) bool {
|
||||
if problems[i].Metric == problems[j].Metric {
|
||||
return problems[i].Text < problems[j].Text
|
||||
}
|
||||
return problems[i].Metric < problems[j].Metric
|
||||
})
|
||||
|
||||
return problems, nil
|
||||
}
|
||||
|
||||
// lint is the entry point for linting a single metric.
|
||||
func lint(mf *dto.MetricFamily) []Problem {
|
||||
fns := []func(mf *dto.MetricFamily) []Problem{
|
||||
lintHelp,
|
||||
lintMetricUnits,
|
||||
lintCounter,
|
||||
lintHistogramSummaryReserved,
|
||||
lintMetricTypeInName,
|
||||
lintReservedChars,
|
||||
lintCamelCase,
|
||||
lintUnitAbbreviations,
|
||||
}
|
||||
|
||||
var problems []Problem
|
||||
for _, fn := range fns {
|
||||
problems = append(problems, fn(mf)...)
|
||||
}
|
||||
|
||||
// TODO(mdlayher): lint rules for specific metrics types.
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintHelp detects issues related to the help text for a metric.
|
||||
func lintHelp(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
|
||||
// Expect all metrics to have help text available.
|
||||
if mf.Help == nil {
|
||||
problems = append(problems, newProblem(mf, "no help text"))
|
||||
}
|
||||
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintMetricUnits detects issues with metric unit names.
|
||||
func lintMetricUnits(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
|
||||
unit, base, ok := metricUnits(*mf.Name)
|
||||
if !ok {
|
||||
// No known units detected.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unit is already a base unit.
|
||||
if unit == base {
|
||||
return nil
|
||||
}
|
||||
|
||||
problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit)))
|
||||
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintCounter detects issues specific to counters, as well as patterns that should
|
||||
// only be used with counters.
|
||||
func lintCounter(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
|
||||
isCounter := mf.GetType() == dto.MetricType_COUNTER
|
||||
isUntyped := mf.GetType() == dto.MetricType_UNTYPED
|
||||
hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
|
||||
|
||||
switch {
|
||||
case isCounter && !hasTotalSuffix:
|
||||
problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`))
|
||||
case !isUntyped && !isCounter && hasTotalSuffix:
|
||||
problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`))
|
||||
}
|
||||
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintHistogramSummaryReserved detects when other types of metrics use names or labels
|
||||
// reserved for use by histograms and/or summaries.
|
||||
func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem {
|
||||
// These rules do not apply to untyped metrics.
|
||||
t := mf.GetType()
|
||||
if t == dto.MetricType_UNTYPED {
|
||||
return nil
|
||||
}
|
||||
|
||||
var problems []Problem
|
||||
|
||||
isHistogram := t == dto.MetricType_HISTOGRAM
|
||||
isSummary := t == dto.MetricType_SUMMARY
|
||||
|
||||
n := mf.GetName()
|
||||
|
||||
if !isHistogram && strings.HasSuffix(n, "_bucket") {
|
||||
problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`))
|
||||
}
|
||||
if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
|
||||
problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`))
|
||||
}
|
||||
if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
|
||||
problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`))
|
||||
}
|
||||
|
||||
for _, m := range mf.GetMetric() {
|
||||
for _, l := range m.GetLabel() {
|
||||
ln := l.GetName()
|
||||
|
||||
if !isHistogram && ln == "le" {
|
||||
problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`))
|
||||
}
|
||||
if !isSummary && ln == "quantile" {
|
||||
problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintMetricTypeInName detects when metric types are included in the metric name.
|
||||
func lintMetricTypeInName(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
n := strings.ToLower(mf.GetName())
|
||||
|
||||
for i, t := range dto.MetricType_name {
|
||||
if i == int32(dto.MetricType_UNTYPED) {
|
||||
continue
|
||||
}
|
||||
|
||||
typename := strings.ToLower(t)
|
||||
if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) {
|
||||
problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename)))
|
||||
}
|
||||
}
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintReservedChars detects colons in metric names.
|
||||
func lintReservedChars(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
if strings.Contains(mf.GetName(), ":") {
|
||||
problems = append(problems, newProblem(mf, "metric names should not contain ':'"))
|
||||
}
|
||||
return problems
|
||||
}
|
||||
|
||||
var camelCase = regexp.MustCompile(`[a-z][A-Z]`)
|
||||
|
||||
// lintCamelCase detects metric names and label names written in camelCase.
|
||||
func lintCamelCase(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
if camelCase.FindString(mf.GetName()) != "" {
|
||||
problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'"))
|
||||
}
|
||||
|
||||
for _, m := range mf.GetMetric() {
|
||||
for _, l := range m.GetLabel() {
|
||||
if camelCase.FindString(l.GetName()) != "" {
|
||||
problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return problems
|
||||
}
|
||||
|
||||
// lintUnitAbbreviations detects abbreviated units in the metric name.
|
||||
func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
|
||||
var problems []Problem
|
||||
n := strings.ToLower(mf.GetName())
|
||||
for _, s := range unitAbbreviations {
|
||||
if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) {
|
||||
problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units"))
|
||||
}
|
||||
}
|
||||
return problems
|
||||
}
|
||||
|
||||
// metricUnits attempts to detect known unit types used as part of a metric name,
|
||||
// e.g. "foo_bytes_total" or "bar_baz_milligrams".
|
||||
func metricUnits(m string) (unit string, base string, ok bool) {
|
||||
ss := strings.Split(m, "_")
|
||||
|
||||
for unit, base := range units {
|
||||
// Also check for "no prefix".
|
||||
for _, p := range append(unitPrefixes, "") {
|
||||
for _, s := range ss {
|
||||
// Attempt to explicitly match a known unit with a known prefix,
|
||||
// as some words may look like "units" when matching suffix.
|
||||
//
|
||||
// As an example, "thermometers" should not match "meters", but
|
||||
// "kilometers" should.
|
||||
if s == p+unit {
|
||||
return p + unit, base, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
// Units and their possible prefixes recognized by this library. More can be
|
||||
// added over time as needed.
|
||||
var (
|
||||
// map a unit to the appropriate base unit.
|
||||
units = map[string]string{
|
||||
// Base units.
|
||||
"amperes": "amperes",
|
||||
"bytes": "bytes",
|
||||
"celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases.
|
||||
"grams": "grams",
|
||||
"joules": "joules",
|
||||
"kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements).
|
||||
"meters": "meters", // Both American and international spelling permitted.
|
||||
"metres": "metres",
|
||||
"seconds": "seconds",
|
||||
"volts": "volts",
|
||||
|
||||
// Non base units.
|
||||
// Time.
|
||||
"minutes": "seconds",
|
||||
"hours": "seconds",
|
||||
"days": "seconds",
|
||||
"weeks": "seconds",
|
||||
// Temperature.
|
||||
"kelvins": "kelvin",
|
||||
"fahrenheit": "celsius",
|
||||
"rankine": "celsius",
|
||||
// Length.
|
||||
"inches": "meters",
|
||||
"yards": "meters",
|
||||
"miles": "meters",
|
||||
// Bytes.
|
||||
"bits": "bytes",
|
||||
// Energy.
|
||||
"calories": "joules",
|
||||
// Mass.
|
||||
"pounds": "grams",
|
||||
"ounces": "grams",
|
||||
}
|
||||
|
||||
unitPrefixes = []string{
|
||||
"pico",
|
||||
"nano",
|
||||
"micro",
|
||||
"milli",
|
||||
"centi",
|
||||
"deci",
|
||||
"deca",
|
||||
"hecto",
|
||||
"kilo",
|
||||
"kibi",
|
||||
"mega",
|
||||
"mibi",
|
||||
"giga",
|
||||
"gibi",
|
||||
"tera",
|
||||
"tebi",
|
||||
"peta",
|
||||
"pebi",
|
||||
}
|
||||
|
||||
// Common abbreviations that we'd like to discourage.
|
||||
unitAbbreviations = []string{
|
||||
"s",
|
||||
"ms",
|
||||
"us",
|
||||
"ns",
|
||||
"sec",
|
||||
"b",
|
||||
"kb",
|
||||
"mb",
|
||||
"gb",
|
||||
"tb",
|
||||
"pb",
|
||||
"m",
|
||||
"h",
|
||||
"d",
|
||||
}
|
||||
)
|
66
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
66
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
@ -31,6 +31,10 @@
|
||||
// testing custom prometheus.Collector implementations and in particular whole
|
||||
// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
|
||||
// and convert it into Prometheus metrics.
|
||||
//
|
||||
// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect
|
||||
// metrics that have issues with their name, type, or metadata without being
|
||||
// necessarily invalid, e.g. a counter with a name missing the “_total” suffix.
|
||||
package testutil
|
||||
|
||||
import (
|
||||
@ -108,36 +112,48 @@ func ToFloat64(c prometheus.Collector) float64 {
|
||||
panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
|
||||
}
|
||||
|
||||
// CollectAndCount collects all Metrics from the provided Collector and returns their number.
|
||||
//
|
||||
// This can be used to assert the number of metrics collected by a given collector after certain operations.
|
||||
//
|
||||
// This function is only for testing purposes, and even for testing, other approaches
|
||||
// are often more appropriate (see this package's documentation).
|
||||
func CollectAndCount(c prometheus.Collector) int {
|
||||
var (
|
||||
mCount int
|
||||
mChan = make(chan prometheus.Metric)
|
||||
done = make(chan struct{})
|
||||
)
|
||||
// CollectAndCount registers the provided Collector with a newly created
|
||||
// pedantic Registry. It then calls GatherAndCount with that Registry and with
|
||||
// the provided metricNames. In the unlikely case that the registration or the
|
||||
// gathering fails, this function panics. (This is inconsistent with the other
|
||||
// CollectAnd… functions in this package and has historical reasons. Changing
|
||||
// the function signature would be a breaking change and will therefore only
|
||||
// happen with the next major version bump.)
|
||||
func CollectAndCount(c prometheus.Collector, metricNames ...string) int {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
if err := reg.Register(c); err != nil {
|
||||
panic(fmt.Errorf("registering collector failed: %s", err))
|
||||
}
|
||||
result, err := GatherAndCount(reg, metricNames...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range mChan {
|
||||
mCount++
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
// GatherAndCount gathers all metrics from the provided Gatherer and counts
|
||||
// them. It returns the number of metric children in all gathered metric
|
||||
// families together. If any metricNames are provided, only metrics with those
|
||||
// names are counted.
|
||||
func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) {
|
||||
got, err := g.Gather()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("gathering metrics failed: %s", err)
|
||||
}
|
||||
if metricNames != nil {
|
||||
got = filterMetrics(got, metricNames)
|
||||
}
|
||||
|
||||
c.Collect(mChan)
|
||||
close(mChan)
|
||||
<-done
|
||||
|
||||
return mCount
|
||||
result := 0
|
||||
for _, mf := range got {
|
||||
result += len(mf.GetMetric())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CollectAndCompare registers the provided Collector with a newly created
|
||||
// pedantic Registry. It then does the same as GatherAndCompare, gathering the
|
||||
// metrics from the pedantic Registry.
|
||||
// pedantic Registry. It then calls GatherAndCompare with that Registry and with
|
||||
// the provided metricNames.
|
||||
func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
if err := reg.Register(c); err != nil {
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
|
||||
|
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
@ -27,7 +28,8 @@ import (
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
// duplicate any of those labels. Wrapping a nil value is valid, resulting
|
||||
// in a no-op Registerer.
|
||||
//
|
||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||
@ -50,6 +52,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||
// Wrapping a nil value is valid, resulting in a no-op Registerer.
|
||||
//
|
||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||
@ -80,6 +83,9 @@ type wrappingRegisterer struct {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return nil
|
||||
}
|
||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
@ -88,6 +94,9 @@ func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range cs {
|
||||
if err := r.Register(c); err != nil {
|
||||
panic(err)
|
||||
@ -96,6 +105,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return false
|
||||
}
|
||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
|
4
vendor/github.com/prometheus/common/model/time.go
generated
vendored
4
vendor/github.com/prometheus/common/model/time.go
generated
vendored
@ -186,6 +186,10 @@ var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
// Allow 0 without a unit.
|
||||
if durationStr == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
|
4
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
4
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@ -1,4 +1,4 @@
|
||||
---
|
||||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
- govet
|
||||
- golint
|
||||
|
3
vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
Normal file
3
vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
## Prometheus Community Code of Conduct
|
||||
|
||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
43
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
43
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -69,12 +69,21 @@ else
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.4.0
|
||||
GOTEST := $(GO) test
|
||||
GOTEST_DIR :=
|
||||
ifneq ($(CIRCLE_JOB),)
|
||||
ifneq ($(shell which gotestsum),)
|
||||
GOTEST_DIR := test-results
|
||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.5.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.16.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.18.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@ -86,7 +95,8 @@ endif
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
@ -140,15 +150,29 @@ else
|
||||
$(GO) get $(GOOPTS) -t ./...
|
||||
endif
|
||||
|
||||
.PHONY: update-go-deps
|
||||
update-go-deps:
|
||||
@echo ">> updating Go dependencies"
|
||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
done
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifneq (,$(wildcard vendor))
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
endif
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short:
|
||||
common-test-short: $(GOTEST_DIR)
|
||||
@echo ">> running short tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test:
|
||||
common-test: $(GOTEST_DIR)
|
||||
@echo ">> running all tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||
|
||||
$(GOTEST_DIR):
|
||||
@mkdir -p $@
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
@ -200,7 +224,7 @@ endif
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
@ -211,9 +235,10 @@ common-tarball: promu
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERFILE_PATH)
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
|
309
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
309
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@ -11,11 +11,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -52,6 +56,11 @@ type CPUInfo struct {
|
||||
PowerManagement string
|
||||
}
|
||||
|
||||
var (
|
||||
cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
|
||||
cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
|
||||
)
|
||||
|
||||
// CPUInfo returns information about current system CPUs.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||
@ -62,14 +71,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||
return parseCPUInfo(data)
|
||||
}
|
||||
|
||||
// parseCPUInfo parses data from /proc/cpuinfo
|
||||
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
cpuinfo := []CPUInfo{}
|
||||
i := -1
|
||||
func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo := []CPUInfo{firstcpu}
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.TrimSpace(line) == "" {
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
@ -82,7 +103,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "vendor_id":
|
||||
case "vendor", "vendor_id":
|
||||
cpuinfo[i].VendorID = field[1]
|
||||
case "cpu family":
|
||||
cpuinfo[i].CPUFamily = field[1]
|
||||
@ -163,5 +184,281 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
|
||||
if !match || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
featuresLine := ""
|
||||
commonCPUInfo := CPUInfo{}
|
||||
i := 0
|
||||
if strings.TrimSpace(field[0]) == "Processor" {
|
||||
commonCPUInfo = CPUInfo{ModelName: field[1]}
|
||||
i = -1
|
||||
} else {
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo = []CPUInfo{firstcpu}
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
|
||||
i++
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "BogoMIPS":
|
||||
if i == -1 {
|
||||
cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
|
||||
i++
|
||||
cpuinfo[i].Processor = 0
|
||||
}
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].BogoMips = v
|
||||
case "Features":
|
||||
featuresLine = line
|
||||
case "model name":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
}
|
||||
}
|
||||
fields := strings.SplitN(featuresLine, ": ", 2)
|
||||
for i := range cpuinfo {
|
||||
cpuinfo[i].Flags = strings.Fields(fields[1])
|
||||
}
|
||||
return cpuinfo, nil
|
||||
|
||||
}
|
||||
|
||||
func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
commonCPUInfo := CPUInfo{VendorID: field[1]}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "bogomips per cpu":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commonCPUInfo.BogoMips = v
|
||||
case "features":
|
||||
commonCPUInfo.Flags = strings.Fields(field[1])
|
||||
}
|
||||
if strings.HasPrefix(line, "processor") {
|
||||
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
|
||||
if len(match) < 2 {
|
||||
return nil, errors.New("Invalid line found in cpuinfo: " + line)
|
||||
}
|
||||
cpu := commonCPUInfo
|
||||
v, err := strconv.ParseUint(match[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpu.Processor = uint(v)
|
||||
cpuinfo = append(cpuinfo, cpu)
|
||||
}
|
||||
if strings.HasPrefix(line, "cpu number") {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
i := 0
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "cpu number":
|
||||
i++
|
||||
case "cpu MHz dynamic":
|
||||
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
|
||||
v, err := strconv.ParseFloat(clock, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUMHz = v
|
||||
}
|
||||
}
|
||||
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
systemType := field[1]
|
||||
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i = int(v)
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
cpuinfo[i].VendorID = systemType
|
||||
case "cpu model":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
case "BogoMIPS":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].BogoMips = v
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo := []CPUInfo{firstcpu}
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
i++
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "cpu":
|
||||
cpuinfo[i].VendorID = field[1]
|
||||
case "clock":
|
||||
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
|
||||
v, err := strconv.ParseFloat(clock, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUMHz = v
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo := []CPUInfo{firstcpu}
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i = int(v)
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "hart":
|
||||
cpuinfo[i].CoreID = field[1]
|
||||
case "isa":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// firstNonEmptyLine advances the scanner to the first non-empty line
|
||||
// and returns the contents of that line
|
||||
func firstNonEmptyLine(scanner *bufio.Scanner) string {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.TrimSpace(line) != "" {
|
||||
return line
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
19
vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build arm arm64
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoARM
|
19
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build mips mipsle mips64 mips64le
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoMips
|
19
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoDummy
|
19
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoPPC
|
18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoS390X
|
19
vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build 386 amd64
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoX86
|
158
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
158
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
@ -14,10 +14,10 @@
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
@ -52,80 +52,102 @@ type Crypto struct {
|
||||
// structs containing the relevant info. More information available here:
|
||||
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||
func (fs FS) Crypto() ([]Crypto, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
|
||||
path := fs.proc.Path("crypto")
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
|
||||
}
|
||||
crypto, err := parseCrypto(data)
|
||||
|
||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
|
||||
}
|
||||
|
||||
return crypto, nil
|
||||
}
|
||||
|
||||
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
|
||||
crypto := []Crypto{}
|
||||
// parseCrypto parses a /proc/crypto stream into Crypto elements.
|
||||
func parseCrypto(r io.Reader) ([]Crypto, error) {
|
||||
var out []Crypto
|
||||
|
||||
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
|
||||
|
||||
for _, block := range cryptoBlocks {
|
||||
var newCryptoElem Crypto
|
||||
|
||||
lines := strings.Split(string(block), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" || line[0] == ' ' {
|
||||
continue
|
||||
}
|
||||
fields := strings.Split(line, ":")
|
||||
key := strings.TrimSpace(fields[0])
|
||||
value := strings.TrimSpace(fields[1])
|
||||
vp := util.NewValueParser(value)
|
||||
|
||||
switch strings.TrimSpace(key) {
|
||||
case "async":
|
||||
b, err := strconv.ParseBool(value)
|
||||
if err == nil {
|
||||
newCryptoElem.Async = b
|
||||
}
|
||||
case "blocksize":
|
||||
newCryptoElem.Blocksize = vp.PUInt64()
|
||||
case "chunksize":
|
||||
newCryptoElem.Chunksize = vp.PUInt64()
|
||||
case "digestsize":
|
||||
newCryptoElem.Digestsize = vp.PUInt64()
|
||||
case "driver":
|
||||
newCryptoElem.Driver = value
|
||||
case "geniv":
|
||||
newCryptoElem.Geniv = value
|
||||
case "internal":
|
||||
newCryptoElem.Internal = value
|
||||
case "ivsize":
|
||||
newCryptoElem.Ivsize = vp.PUInt64()
|
||||
case "maxauthsize":
|
||||
newCryptoElem.Maxauthsize = vp.PUInt64()
|
||||
case "max keysize":
|
||||
newCryptoElem.MaxKeysize = vp.PUInt64()
|
||||
case "min keysize":
|
||||
newCryptoElem.MinKeysize = vp.PUInt64()
|
||||
case "module":
|
||||
newCryptoElem.Module = value
|
||||
case "name":
|
||||
newCryptoElem.Name = value
|
||||
case "priority":
|
||||
newCryptoElem.Priority = vp.PInt64()
|
||||
case "refcnt":
|
||||
newCryptoElem.Refcnt = vp.PInt64()
|
||||
case "seedsize":
|
||||
newCryptoElem.Seedsize = vp.PUInt64()
|
||||
case "selftest":
|
||||
newCryptoElem.Selftest = value
|
||||
case "type":
|
||||
newCryptoElem.Type = value
|
||||
case "walksize":
|
||||
newCryptoElem.Walksize = vp.PUInt64()
|
||||
}
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
text := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(text, "name"):
|
||||
// Each crypto element begins with its name.
|
||||
out = append(out, Crypto{})
|
||||
case text == "":
|
||||
continue
|
||||
}
|
||||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("malformed crypto line: %q", text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
v := strings.TrimSpace(kv[1])
|
||||
|
||||
// Parse the key/value pair into the currently focused element.
|
||||
c := &out[len(out)-1]
|
||||
if err := c.parseKV(k, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crypto = append(crypto, newCryptoElem)
|
||||
}
|
||||
return crypto, nil
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parseKV parses a key/value pair into the appropriate field of c.
|
||||
func (c *Crypto) parseKV(k, v string) error {
|
||||
vp := util.NewValueParser(v)
|
||||
|
||||
switch k {
|
||||
case "async":
|
||||
// Interpret literal yes as true.
|
||||
c.Async = v == "yes"
|
||||
case "blocksize":
|
||||
c.Blocksize = vp.PUInt64()
|
||||
case "chunksize":
|
||||
c.Chunksize = vp.PUInt64()
|
||||
case "digestsize":
|
||||
c.Digestsize = vp.PUInt64()
|
||||
case "driver":
|
||||
c.Driver = v
|
||||
case "geniv":
|
||||
c.Geniv = v
|
||||
case "internal":
|
||||
c.Internal = v
|
||||
case "ivsize":
|
||||
c.Ivsize = vp.PUInt64()
|
||||
case "maxauthsize":
|
||||
c.Maxauthsize = vp.PUInt64()
|
||||
case "max keysize":
|
||||
c.MaxKeysize = vp.PUInt64()
|
||||
case "min keysize":
|
||||
c.MinKeysize = vp.PUInt64()
|
||||
case "module":
|
||||
c.Module = v
|
||||
case "name":
|
||||
c.Name = v
|
||||
case "priority":
|
||||
c.Priority = vp.PInt64()
|
||||
case "refcnt":
|
||||
c.Refcnt = vp.PInt64()
|
||||
case "seedsize":
|
||||
c.Seedsize = vp.PUInt64()
|
||||
case "selftest":
|
||||
c.Selftest = v
|
||||
case "type":
|
||||
c.Type = v
|
||||
case "walksize":
|
||||
c.Walksize = vp.PUInt64()
|
||||
}
|
||||
|
||||
return vp.Err()
|
||||
}
|
||||
|
881
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
881
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load Diff
422
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
Normal file
422
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Fscacheinfo represents fscache statistics.
|
||||
type Fscacheinfo struct {
|
||||
// Number of index cookies allocated
|
||||
IndexCookiesAllocated uint64
|
||||
// data storage cookies allocated
|
||||
DataStorageCookiesAllocated uint64
|
||||
// Number of special cookies allocated
|
||||
SpecialCookiesAllocated uint64
|
||||
// Number of objects allocated
|
||||
ObjectsAllocated uint64
|
||||
// Number of object allocation failures
|
||||
ObjectAllocationsFailure uint64
|
||||
// Number of objects that reached the available state
|
||||
ObjectsAvailable uint64
|
||||
// Number of objects that reached the dead state
|
||||
ObjectsDead uint64
|
||||
// Number of objects that didn't have a coherency check
|
||||
ObjectsWithoutCoherencyCheck uint64
|
||||
// Number of objects that passed a coherency check
|
||||
ObjectsWithCoherencyCheck uint64
|
||||
// Number of objects that needed a coherency data update
|
||||
ObjectsNeedCoherencyCheckUpdate uint64
|
||||
// Number of objects that were declared obsolete
|
||||
ObjectsDeclaredObsolete uint64
|
||||
// Number of pages marked as being cached
|
||||
PagesMarkedAsBeingCached uint64
|
||||
// Number of uncache page requests seen
|
||||
UncachePagesRequestSeen uint64
|
||||
// Number of acquire cookie requests seen
|
||||
AcquireCookiesRequestSeen uint64
|
||||
// Number of acq reqs given a NULL parent
|
||||
AcquireRequestsWithNullParent uint64
|
||||
// Number of acq reqs rejected due to no cache available
|
||||
AcquireRequestsRejectedNoCacheAvailable uint64
|
||||
// Number of acq reqs succeeded
|
||||
AcquireRequestsSucceeded uint64
|
||||
// Number of acq reqs rejected due to error
|
||||
AcquireRequestsRejectedDueToError uint64
|
||||
// Number of acq reqs failed on ENOMEM
|
||||
AcquireRequestsFailedDueToEnomem uint64
|
||||
// Number of lookup calls made on cache backends
|
||||
LookupsNumber uint64
|
||||
// Number of negative lookups made
|
||||
LookupsNegative uint64
|
||||
// Number of positive lookups made
|
||||
LookupsPositive uint64
|
||||
// Number of objects created by lookup
|
||||
ObjectsCreatedByLookup uint64
|
||||
// Number of lookups timed out and requeued
|
||||
LookupsTimedOutAndRequed uint64
|
||||
InvalidationsNumber uint64
|
||||
InvalidationsRunning uint64
|
||||
// Number of update cookie requests seen
|
||||
UpdateCookieRequestSeen uint64
|
||||
// Number of upd reqs given a NULL parent
|
||||
UpdateRequestsWithNullParent uint64
|
||||
// Number of upd reqs granted CPU time
|
||||
UpdateRequestsRunning uint64
|
||||
// Number of relinquish cookie requests seen
|
||||
RelinquishCookiesRequestSeen uint64
|
||||
// Number of rlq reqs given a NULL parent
|
||||
RelinquishCookiesWithNullParent uint64
|
||||
// Number of rlq reqs waited on completion of creation
|
||||
RelinquishRequestsWaitingCompleteCreation uint64
|
||||
// Relinqs rtr
|
||||
RelinquishRetries uint64
|
||||
// Number of attribute changed requests seen
|
||||
AttributeChangedRequestsSeen uint64
|
||||
// Number of attr changed requests queued
|
||||
AttributeChangedRequestsQueued uint64
|
||||
// Number of attr changed rejected -ENOBUFS
|
||||
AttributeChangedRejectDueToEnobufs uint64
|
||||
// Number of attr changed failed -ENOMEM
|
||||
AttributeChangedFailedDueToEnomem uint64
|
||||
// Number of attr changed ops given CPU time
|
||||
AttributeChangedOps uint64
|
||||
// Number of allocation requests seen
|
||||
AllocationRequestsSeen uint64
|
||||
// Number of successful alloc reqs
|
||||
AllocationOkRequests uint64
|
||||
// Number of alloc reqs that waited on lookup completion
|
||||
AllocationWaitingOnLookup uint64
|
||||
// Number of alloc reqs rejected -ENOBUFS
|
||||
AllocationsRejectedDueToEnobufs uint64
|
||||
// Number of alloc reqs aborted -ERESTARTSYS
|
||||
AllocationsAbortedDueToErestartsys uint64
|
||||
// Number of alloc reqs submitted
|
||||
AllocationOperationsSubmitted uint64
|
||||
// Number of alloc reqs waited for CPU time
|
||||
AllocationsWaitedForCPU uint64
|
||||
// Number of alloc reqs aborted due to object death
|
||||
AllocationsAbortedDueToObjectDeath uint64
|
||||
// Number of retrieval (read) requests seen
|
||||
RetrievalsReadRequests uint64
|
||||
// Number of successful retr reqs
|
||||
RetrievalsOk uint64
|
||||
// Number of retr reqs that waited on lookup completion
|
||||
RetrievalsWaitingLookupCompletion uint64
|
||||
// Number of retr reqs returned -ENODATA
|
||||
RetrievalsReturnedEnodata uint64
|
||||
// Number of retr reqs rejected -ENOBUFS
|
||||
RetrievalsRejectedDueToEnobufs uint64
|
||||
// Number of retr reqs aborted -ERESTARTSYS
|
||||
RetrievalsAbortedDueToErestartsys uint64
|
||||
// Number of retr reqs failed -ENOMEM
|
||||
RetrievalsFailedDueToEnomem uint64
|
||||
// Number of retr reqs submitted
|
||||
RetrievalsRequests uint64
|
||||
// Number of retr reqs waited for CPU time
|
||||
RetrievalsWaitingCPU uint64
|
||||
// Number of retr reqs aborted due to object death
|
||||
RetrievalsAbortedDueToObjectDeath uint64
|
||||
// Number of storage (write) requests seen
|
||||
StoreWriteRequests uint64
|
||||
// Number of successful store reqs
|
||||
StoreSuccessfulRequests uint64
|
||||
// Number of store reqs on a page already pending storage
|
||||
StoreRequestsOnPendingStorage uint64
|
||||
// Number of store reqs rejected -ENOBUFS
|
||||
StoreRequestsRejectedDueToEnobufs uint64
|
||||
// Number of store reqs failed -ENOMEM
|
||||
StoreRequestsFailedDueToEnomem uint64
|
||||
// Number of store reqs submitted
|
||||
StoreRequestsSubmitted uint64
|
||||
// Number of store reqs granted CPU time
|
||||
StoreRequestsRunning uint64
|
||||
// Number of pages given store req processing time
|
||||
StorePagesWithRequestsProcessing uint64
|
||||
// Number of store reqs deleted from tracking tree
|
||||
StoreRequestsDeleted uint64
|
||||
// Number of store reqs over store limit
|
||||
StoreRequestsOverStoreLimit uint64
|
||||
// Number of release reqs against pages with no pending store
|
||||
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
|
||||
// Number of release reqs against pages stored by time lock granted
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||
// Number of page stores cancelled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64
|
||||
VmscanWaiting uint64
|
||||
// Number of times async ops added to pending queues
|
||||
OpsPending uint64
|
||||
// Number of times async ops given CPU time
|
||||
OpsRunning uint64
|
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64
|
||||
// Number of async ops cancelled
|
||||
OpsCancelled uint64
|
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64
|
||||
// Number of async ops initialised
|
||||
OpsInitialised uint64
|
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64
|
||||
// Number of async ops released (should equal ini=N when idle)
|
||||
OpsReleased uint64
|
||||
// Number of deferred-release async ops garbage collected
|
||||
OpsGarbageCollected uint64
|
||||
// Number of in-progress alloc_object() cache ops
|
||||
CacheopAllocationsinProgress uint64
|
||||
// Number of in-progress lookup_object() cache ops
|
||||
CacheopLookupObjectInProgress uint64
|
||||
// Number of in-progress lookup_complete() cache ops
|
||||
CacheopLookupCompleteInPorgress uint64
|
||||
// Number of in-progress grab_object() cache ops
|
||||
CacheopGrabObjectInProgress uint64
|
||||
CacheopInvalidations uint64
|
||||
// Number of in-progress update_object() cache ops
|
||||
CacheopUpdateObjectInProgress uint64
|
||||
// Number of in-progress drop_object() cache ops
|
||||
CacheopDropObjectInProgress uint64
|
||||
// Number of in-progress put_object() cache ops
|
||||
CacheopPutObjectInProgress uint64
|
||||
// Number of in-progress attr_changed() cache ops
|
||||
CacheopAttributeChangeInProgress uint64
|
||||
// Number of in-progress sync_cache() cache ops
|
||||
CacheopSyncCacheInProgress uint64
|
||||
// Number of in-progress read_or_alloc_page() cache ops
|
||||
CacheopReadOrAllocPageInProgress uint64
|
||||
// Number of in-progress read_or_alloc_pages() cache ops
|
||||
CacheopReadOrAllocPagesInProgress uint64
|
||||
// Number of in-progress allocate_page() cache ops
|
||||
CacheopAllocatePageInProgress uint64
|
||||
// Number of in-progress allocate_pages() cache ops
|
||||
CacheopAllocatePagesInProgress uint64
|
||||
// Number of in-progress write_page() cache ops
|
||||
CacheopWritePagesInProgress uint64
|
||||
// Number of in-progress uncache_page() cache ops
|
||||
CacheopUncachePagesInProgress uint64
|
||||
// Number of in-progress dissociate_pages() cache ops
|
||||
CacheopDissociatePagesInProgress uint64
|
||||
// Number of object lookups/creations rejected due to lack of space
|
||||
CacheevLookupsAndCreationsRejectedLackSpace uint64
|
||||
// Number of stale objects deleted
|
||||
CacheevStaleObjectsDeleted uint64
|
||||
// Number of objects retired when relinquished
|
||||
CacheevRetiredWhenReliquished uint64
|
||||
// Number of objects culled
|
||||
CacheevObjectsCulled uint64
|
||||
}
|
||||
|
||||
// Fscacheinfo returns information about current fscache statistics.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
|
||||
func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
||||
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, err
|
||||
}
|
||||
|
||||
m, err := parseFscacheinfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
||||
var err error
|
||||
if len(fields) < len(setFields) {
|
||||
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
|
||||
}
|
||||
|
||||
for i := range setFields {
|
||||
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
|
||||
var m Fscacheinfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "Cookies:":
|
||||
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
|
||||
&m.SpecialCookiesAllocated)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Objects:":
|
||||
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
|
||||
&m.ObjectsAvailable, &m.ObjectsDead)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "ChkAux":
|
||||
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
|
||||
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Pages":
|
||||
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Acquire:":
|
||||
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
|
||||
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
|
||||
&m.AcquireRequestsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Lookups:":
|
||||
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
|
||||
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Invals":
|
||||
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Updates:":
|
||||
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
|
||||
&m.UpdateRequestsRunning)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Relinqs:":
|
||||
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
|
||||
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "AttrChg:":
|
||||
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
|
||||
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Allocs":
|
||||
if strings.Split(fields[2], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
|
||||
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
|
||||
&m.AllocationsAbortedDueToObjectDeath)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "Retrvls:":
|
||||
if strings.Split(fields[1], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
|
||||
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
|
||||
&m.RetrievalsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "Stores":
|
||||
if strings.Split(fields[2], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
|
||||
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
|
||||
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "VmScan":
|
||||
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
|
||||
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
|
||||
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Ops":
|
||||
if strings.Split(fields[2], "=")[0] == "pend" {
|
||||
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "CacheOp:":
|
||||
if strings.Split(fields[1], "=")[0] == "alo" {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
|
||||
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else if strings.Split(fields[1], "=")[0] == "inv" {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
|
||||
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
|
||||
&m.CacheopSyncCacheInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
|
||||
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
|
||||
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "CacheEv:":
|
||||
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
|
||||
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
1
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
1
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
@ -5,4 +5,5 @@ go 1.12
|
||||
require (
|
||||
github.com/google/go-cmp v0.3.1
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
|
||||
)
|
||||
|
2
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
2
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
@ -2,3 +2,5 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
9
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
9
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) {
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
|
||||
func ReadIntFromFile(path string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// ParseBool parses a string into a boolean pointer.
|
||||
func ParseBool(b string) *bool {
|
||||
var truth bool
|
||||
|
62
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
Normal file
62
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// KernelRandom contains information about to the kernel's random number generator.
|
||||
type KernelRandom struct {
|
||||
// EntropyAvaliable gives the available entropy, in bits.
|
||||
EntropyAvaliable *uint64
|
||||
// PoolSize gives the size of the entropy pool, in bits.
|
||||
PoolSize *uint64
|
||||
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
|
||||
URandomMinReseedSeconds *uint64
|
||||
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
|
||||
// that do a select(2) or poll(2) for write access to /dev/random.
|
||||
WriteWakeupThreshold *uint64
|
||||
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
|
||||
// waiting for entropy from /dev/random.
|
||||
ReadWakeupThreshold *uint64
|
||||
}
|
||||
|
||||
// KernelRandom returns values from /proc/sys/kernel/random.
|
||||
func (fs FS) KernelRandom() (KernelRandom, error) {
|
||||
random := KernelRandom{}
|
||||
|
||||
for file, p := range map[string]**uint64{
|
||||
"entropy_avail": &random.EntropyAvaliable,
|
||||
"poolsize": &random.PoolSize,
|
||||
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
|
||||
"write_wakeup_threshold": &random.WriteWakeupThreshold,
|
||||
"read_wakeup_threshold": &random.ReadWakeupThreshold,
|
||||
} {
|
||||
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return random, err
|
||||
}
|
||||
*p = &val
|
||||
}
|
||||
|
||||
return random, nil
|
||||
}
|
62
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
Normal file
62
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// LoadAvg represents an entry in /proc/loadavg
|
||||
type LoadAvg struct {
|
||||
Load1 float64
|
||||
Load5 float64
|
||||
Load15 float64
|
||||
}
|
||||
|
||||
// LoadAvg returns loadavg from /proc.
|
||||
func (fs FS) LoadAvg() (*LoadAvg, error) {
|
||||
path := fs.proc.Path("loadavg")
|
||||
|
||||
data, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseLoad(data)
|
||||
}
|
||||
|
||||
// Parse /proc loadavg and return 1m, 5m and 15m.
|
||||
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
|
||||
loads := make([]float64, 3)
|
||||
parts := strings.Fields(string(loadavgBytes))
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, load := range parts[0:3] {
|
||||
loads[i], err = strconv.ParseFloat(load, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
|
||||
}
|
||||
}
|
||||
return &LoadAvg{
|
||||
Load1: loads[0],
|
||||
Load5: loads[1],
|
||||
Load15: loads[2],
|
||||
}, nil
|
||||
}
|
7
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
7
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -52,7 +52,7 @@ type MDStat struct {
|
||||
func (fs FS) MDStat() ([]MDStat, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||
return nil, err
|
||||
}
|
||||
mdstat, err := parseMDStat(data)
|
||||
if err != nil {
|
||||
@ -107,11 +107,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
syncedBlocks := size
|
||||
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
||||
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
||||
checking := strings.Contains(lines[syncLineIdx], "check")
|
||||
|
||||
// Append recovery and resyncing state info.
|
||||
if recovering || resyncing {
|
||||
if recovering || resyncing || checking {
|
||||
if recovering {
|
||||
state = "recovering"
|
||||
} else if checking {
|
||||
state = "checking"
|
||||
} else {
|
||||
state = "resyncing"
|
||||
}
|
||||
|
20
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
20
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
@ -29,10 +29,10 @@ import (
|
||||
// is described in the following man page.
|
||||
// http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
type MountInfo struct {
|
||||
// Unique Id for the mount
|
||||
MountId int
|
||||
// The Id of the parent mount
|
||||
ParentId int
|
||||
// Unique ID for the mount
|
||||
MountID int
|
||||
// The ID of the parent mount
|
||||
ParentID int
|
||||
// The value of `st_dev` for the files on this FS
|
||||
MajorMinorVer string
|
||||
// The pathname of the directory in the FS that forms
|
||||
@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
|
||||
|
||||
mountInfo := strings.Split(mountString, " ")
|
||||
mountInfoLength := len(mountInfo)
|
||||
if mountInfoLength < 11 {
|
||||
if mountInfoLength < 10 {
|
||||
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
|
||||
}
|
||||
|
||||
@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
|
||||
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
|
||||
}
|
||||
|
||||
mount.MountId, err = strconv.Atoi(mountInfo[0])
|
||||
mount.MountID, err = strconv.Atoi(mountInfo[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse mount ID")
|
||||
}
|
||||
mount.ParentId, err = strconv.Atoi(mountInfo[1])
|
||||
mount.ParentID, err = strconv.Atoi(mountInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse parent ID")
|
||||
}
|
||||
@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
|
||||
return optionalFields, nil
|
||||
}
|
||||
|
||||
// Parses the mount options, superblock options.
|
||||
// mountOptionsParser parses the mount options, superblock options.
|
||||
func mountOptionsParser(mountOptions string) map[string]string {
|
||||
opts := make(map[string]string)
|
||||
options := strings.Split(mountOptions, ",")
|
||||
@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string {
|
||||
return opts
|
||||
}
|
||||
|
||||
// Retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||
// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||
func GetMounts() ([]*MountInfo, error) {
|
||||
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) {
|
||||
return parseMountInfo(data)
|
||||
}
|
||||
|
||||
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||
// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||
func GetProcMounts(pid int) ([]*MountInfo, error) {
|
||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
|
20
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
20
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -186,6 +186,8 @@ type NFSOperationStats struct {
|
||||
CumulativeTotalResponseMilliseconds uint64
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestMilliseconds uint64
|
||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||
Errors uint64
|
||||
}
|
||||
|
||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
@ -494,8 +496,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||
// line is reached.
|
||||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
const (
|
||||
// Number of expected fields in each per-operation statistics set
|
||||
numFields = 9
|
||||
// Minimum number of expected fields in each per-operation statistics set
|
||||
minFields = 9
|
||||
)
|
||||
|
||||
var ops []NFSOperationStats
|
||||
@ -508,12 +510,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
break
|
||||
}
|
||||
|
||||
if len(ss) != numFields {
|
||||
if len(ss) < minFields {
|
||||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||
}
|
||||
|
||||
// Skip string operation name for integers
|
||||
ns := make([]uint64, 0, numFields-1)
|
||||
ns := make([]uint64, 0, minFields-1)
|
||||
for _, st := range ss[1:] {
|
||||
n, err := strconv.ParseUint(st, 10, 64)
|
||||
if err != nil {
|
||||
@ -523,7 +525,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
ops = append(ops, NFSOperationStats{
|
||||
opStats := NFSOperationStats{
|
||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||
Requests: ns[0],
|
||||
Transmissions: ns[1],
|
||||
@ -533,7 +535,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
CumulativeQueueMilliseconds: ns[5],
|
||||
CumulativeTotalResponseMilliseconds: ns[6],
|
||||
CumulativeTotalRequestMilliseconds: ns[7],
|
||||
})
|
||||
}
|
||||
|
||||
if len(ns) > 8 {
|
||||
opStats.Errors = ns[8]
|
||||
}
|
||||
|
||||
ops = append(ops, opStats)
|
||||
}
|
||||
|
||||
return ops, s.Err()
|
||||
|
153
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
Normal file
153
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
|
||||
// and contains netfilter conntrack statistics at one CPU core
|
||||
type ConntrackStatEntry struct {
|
||||
Entries uint64
|
||||
Found uint64
|
||||
Invalid uint64
|
||||
Ignore uint64
|
||||
Insert uint64
|
||||
InsertFailed uint64
|
||||
Drop uint64
|
||||
EarlyDrop uint64
|
||||
SearchRestart uint64
|
||||
}
|
||||
|
||||
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
|
||||
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
|
||||
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
|
||||
}
|
||||
|
||||
// Parses a slice of ConntrackStatEntries from the given filepath
|
||||
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
||||
// This file is small and can be read with one syscall.
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
// Do not wrap this error so the caller can detect os.IsNotExist and
|
||||
// similar conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := parseConntrackStat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
|
||||
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
||||
var entries []ConntrackStatEntry
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Scan()
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
conntrackEntry, err := parseConntrackStatEntry(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, *conntrackEntry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Parses a ConntrackStatEntry from given array of fields
|
||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
||||
}
|
||||
entry := &ConntrackStatEntry{}
|
||||
|
||||
entries, err := parseConntrackStatField(fields[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Entries = entries
|
||||
|
||||
found, err := parseConntrackStatField(fields[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Found = found
|
||||
|
||||
invalid, err := parseConntrackStatField(fields[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Invalid = invalid
|
||||
|
||||
ignore, err := parseConntrackStatField(fields[5])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Ignore = ignore
|
||||
|
||||
insert, err := parseConntrackStatField(fields[8])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Insert = insert
|
||||
|
||||
insertFailed, err := parseConntrackStatField(fields[9])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.InsertFailed = insertFailed
|
||||
|
||||
drop, err := parseConntrackStatField(fields[10])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Drop = drop
|
||||
|
||||
earlyDrop, err := parseConntrackStatField(fields[11])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EarlyDrop = earlyDrop
|
||||
|
||||
searchRestart, err := parseConntrackStatField(fields[16])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.SearchRestart = searchRestart
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Parses a uint64 from given hex in string
|
||||
func parseConntrackStatField(field string) (uint64, error) {
|
||||
val, err := strconv.ParseUint(field, 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
|
||||
}
|
||||
return val, err
|
||||
}
|
115
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
115
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -14,78 +14,89 @@
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// For the proc file format details,
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// See:
|
||||
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
|
||||
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||
|
||||
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
|
||||
type SoftnetEntry struct {
|
||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
|
||||
type SoftnetStat struct {
|
||||
// Number of processed packets
|
||||
Processed uint
|
||||
Processed uint32
|
||||
// Number of dropped packets
|
||||
Dropped uint
|
||||
Dropped uint32
|
||||
// Number of times processing packets ran out of quota
|
||||
TimeSqueezed uint
|
||||
TimeSqueezed uint32
|
||||
}
|
||||
|
||||
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
|
||||
// and then return a slice of SoftnetEntry's.
|
||||
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
|
||||
var softNetProcFile = "net/softnet_stat"
|
||||
|
||||
// NetSoftnetStat reads data from /proc/net/softnet_stat.
|
||||
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
|
||||
b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseSoftnetEntries(data)
|
||||
}
|
||||
|
||||
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
entries := make([]SoftnetEntry, 0)
|
||||
var err error
|
||||
const (
|
||||
expectedColumns = 11
|
||||
)
|
||||
for _, line := range lines {
|
||||
columns := strings.Fields(line)
|
||||
width := len(columns)
|
||||
if width == 0 {
|
||||
continue
|
||||
}
|
||||
if width != expectedColumns {
|
||||
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
|
||||
}
|
||||
var entry SoftnetEntry
|
||||
if entry, err = parseSoftnetEntry(columns); err != nil {
|
||||
return []SoftnetEntry{}, err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
entries, err := parseSoftnet(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
|
||||
var err error
|
||||
var processed, dropped, timeSqueezed uint64
|
||||
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
|
||||
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
||||
const minColumns = 9
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
var stats []SoftnetStat
|
||||
for s.Scan() {
|
||||
columns := strings.Fields(s.Text())
|
||||
width := len(columns)
|
||||
|
||||
if width < minColumns {
|
||||
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
|
||||
}
|
||||
|
||||
// We only parse the first three columns at the moment.
|
||||
us, err := parseHexUint32s(columns[0:3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats = append(stats, SoftnetStat{
|
||||
Processed: us[0],
|
||||
Dropped: us[1],
|
||||
TimeSqueezed: us[2],
|
||||
})
|
||||
}
|
||||
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
|
||||
}
|
||||
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
|
||||
}
|
||||
return SoftnetEntry{
|
||||
Processed: uint(processed),
|
||||
Dropped: uint(dropped),
|
||||
TimeSqueezed: uint(timeSqueezed),
|
||||
}, nil
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func parseHexUint32s(ss []string) ([]uint32, error) {
|
||||
us := make([]uint32, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 16, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, uint32(u))
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
229
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
Normal file
229
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
// With e.g. 150 Byte per line and the maximum number of 65535,
|
||||
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
|
||||
readLimit = 4294967296 // Byte -> 4 GiB
|
||||
)
|
||||
|
||||
type (
|
||||
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
|
||||
NetUDP []*netUDPLine
|
||||
|
||||
// NetUDPSummary provides already computed values like the total queue lengths or
|
||||
// the total number of used sockets. In contrast to NetUDP it does not collect
|
||||
// the parsed lines into a slice.
|
||||
NetUDPSummary struct {
|
||||
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
|
||||
TxQueueLength uint64
|
||||
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
|
||||
RxQueueLength uint64
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
}
|
||||
|
||||
// netUDPLine represents the fields parsed from a single line
|
||||
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netUDPLine struct {
|
||||
Sl uint64
|
||||
LocalAddr net.IP
|
||||
LocalPort uint64
|
||||
RemAddr net.IP
|
||||
RemPort uint64
|
||||
St uint64
|
||||
TxQueue uint64
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
}
|
||||
)
|
||||
|
||||
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
|
||||
// read from /proc/net/udp.
|
||||
func (fs FS) NetUDP() (NetUDP, error) {
|
||||
return newNetUDP(fs.proc.Path("net/udp"))
|
||||
}
|
||||
|
||||
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
|
||||
// read from /proc/net/udp6.
|
||||
func (fs FS) NetUDP6() (NetUDP, error) {
|
||||
return newNetUDP(fs.proc.Path("net/udp6"))
|
||||
}
|
||||
|
||||
// NetUDPSummary returns already computed statistics like the total queue lengths
|
||||
// for UDP datagrams read from /proc/net/udp.
|
||||
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
|
||||
return newNetUDPSummary(fs.proc.Path("net/udp"))
|
||||
}
|
||||
|
||||
// NetUDP6Summary returns already computed statistics like the total queue lengths
|
||||
// for UDP datagrams read from /proc/net/udp6.
|
||||
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
|
||||
return newNetUDPSummary(fs.proc.Path("net/udp6"))
|
||||
}
|
||||
|
||||
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDP(file string) (NetUDP, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDP := NetUDP{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDP = append(netUDP, line)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDP, nil
|
||||
}
|
||||
|
||||
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDPSummary := &NetUDPSummary{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDPSummary.TxQueueLength += line.TxQueue
|
||||
netUDPSummary.RxQueueLength += line.RxQueue
|
||||
netUDPSummary.UsedSockets++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDPSummary, nil
|
||||
}
|
||||
|
||||
// parseNetUDPLine parses a single line, represented by a list of fields.
|
||||
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
|
||||
line := &netUDPLine{}
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse net udp socket line as it has less then 8 columns: %s",
|
||||
strings.Join(fields, " "),
|
||||
)
|
||||
}
|
||||
var err error // parse error
|
||||
|
||||
// sl
|
||||
s := strings.Split(fields[0], ":")
|
||||
if len(s) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse sl field in udp socket line: %s", fields[0])
|
||||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
if len(l) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
r := strings.Split(fields[2], ":")
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse st value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
q := strings.Split(fields[4], ":")
|
||||
if len(q) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
|
||||
fields[4],
|
||||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse uid value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
224
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
224
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@ -15,7 +15,6 @@ package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -27,25 +26,15 @@ import (
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||
|
||||
const (
|
||||
netUnixKernelPtrIdx = iota
|
||||
netUnixRefCountIdx
|
||||
_
|
||||
netUnixFlagsIdx
|
||||
netUnixTypeIdx
|
||||
netUnixStateIdx
|
||||
netUnixInodeIdx
|
||||
|
||||
// Inode and Path are optional.
|
||||
netUnixStaticFieldsCnt = 6
|
||||
)
|
||||
|
||||
// Constants for the various /proc/net/unix enumerations.
|
||||
// TODO: match against x/sys/unix or similar?
|
||||
const (
|
||||
netUnixTypeStream = 1
|
||||
netUnixTypeDgram = 2
|
||||
netUnixTypeSeqpacket = 5
|
||||
|
||||
netUnixFlagListen = 1 << 16
|
||||
netUnixFlagDefault = 0
|
||||
netUnixFlagListen = 1 << 16
|
||||
|
||||
netUnixStateUnconnected = 1
|
||||
netUnixStateConnecting = 2
|
||||
@ -53,129 +42,127 @@ const (
|
||||
netUnixStateDisconnected = 4
|
||||
)
|
||||
|
||||
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
|
||||
// NetUNIXType is the type of the type field.
|
||||
type NetUNIXType uint64
|
||||
|
||||
// NetUnixType is the type of the type field.
|
||||
type NetUnixType uint64
|
||||
// NetUNIXFlags is the type of the flags field.
|
||||
type NetUNIXFlags uint64
|
||||
|
||||
// NetUnixFlags is the type of the flags field.
|
||||
type NetUnixFlags uint64
|
||||
// NetUNIXState is the type of the state field.
|
||||
type NetUNIXState uint64
|
||||
|
||||
// NetUnixState is the type of the state field.
|
||||
type NetUnixState uint64
|
||||
|
||||
// NetUnixLine represents a line of /proc/net/unix.
|
||||
type NetUnixLine struct {
|
||||
// NetUNIXLine represents a line of /proc/net/unix.
|
||||
type NetUNIXLine struct {
|
||||
KernelPtr string
|
||||
RefCount uint64
|
||||
Protocol uint64
|
||||
Flags NetUnixFlags
|
||||
Type NetUnixType
|
||||
State NetUnixState
|
||||
Flags NetUNIXFlags
|
||||
Type NetUNIXType
|
||||
State NetUNIXState
|
||||
Inode uint64
|
||||
Path string
|
||||
}
|
||||
|
||||
// NetUnix holds the data read from /proc/net/unix.
|
||||
type NetUnix struct {
|
||||
Rows []*NetUnixLine
|
||||
// NetUNIX holds the data read from /proc/net/unix.
|
||||
type NetUNIX struct {
|
||||
Rows []*NetUNIXLine
|
||||
}
|
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func NewNetUnix() (*NetUnix, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewNetUnix()
|
||||
// NetUNIX returns data read from /proc/net/unix.
|
||||
func (fs FS) NetUNIX() (*NetUNIX, error) {
|
||||
return readNetUNIX(fs.proc.Path("net/unix"))
|
||||
}
|
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func (fs FS) NewNetUnix() (*NetUnix, error) {
|
||||
return NewNetUnixByPath(fs.proc.Path("net/unix"))
|
||||
}
|
||||
|
||||
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByPath(path string) (*NetUnix, error) {
|
||||
f, err := os.Open(path)
|
||||
// readNetUNIX reads data in /proc/net/unix format from the specified file.
|
||||
func readNetUNIX(file string) (*NetUNIX, error) {
|
||||
// This file could be quite large and a streaming read is desirable versus
|
||||
// reading the entire contents at once.
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return NewNetUnixByReader(f)
|
||||
|
||||
return parseNetUNIX(f)
|
||||
}
|
||||
|
||||
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
|
||||
nu := &NetUnix{
|
||||
Rows: make([]*NetUnixLine, 0, 32),
|
||||
}
|
||||
scanner := bufio.NewScanner(reader)
|
||||
// Omit the header line.
|
||||
scanner.Scan()
|
||||
header := scanner.Text()
|
||||
// From the man page of proc(5), it does not contain an Inode field,
|
||||
// but in actually it exists.
|
||||
// This code works for both cases.
|
||||
hasInode := strings.Contains(header, "Inode")
|
||||
// parseNetUNIX creates a NetUnix structure from the incoming stream.
|
||||
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
// Begin scanning by checking for the existence of Inode.
|
||||
s := bufio.NewScanner(r)
|
||||
s.Scan()
|
||||
|
||||
minFieldsCnt := netUnixStaticFieldsCnt
|
||||
// From the man page of proc(5), it does not contain an Inode field,
|
||||
// but in actually it exists. This code works for both cases.
|
||||
hasInode := strings.Contains(s.Text(), "Inode")
|
||||
|
||||
// Expect a minimum number of fields, but Inode and Path are optional:
|
||||
// Num RefCount Protocol Flags Type St Inode Path
|
||||
minFields := 6
|
||||
if hasInode {
|
||||
minFieldsCnt++
|
||||
minFields++
|
||||
}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
|
||||
|
||||
var nu NetUNIX
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFields)
|
||||
if err != nil {
|
||||
return nu, err
|
||||
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
|
||||
}
|
||||
|
||||
nu.Rows = append(nu.Rows, item)
|
||||
}
|
||||
|
||||
return nu, scanner.Err()
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
|
||||
}
|
||||
|
||||
return &nu, nil
|
||||
}
|
||||
|
||||
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
||||
fields := strings.Fields(line)
|
||||
fieldsLen := len(fields)
|
||||
if fieldsLen < minFieldsCnt {
|
||||
return nil, fmt.Errorf(
|
||||
"Parse Unix domain failed: expect at least %d fields but got %d",
|
||||
minFieldsCnt, fieldsLen)
|
||||
|
||||
l := len(fields)
|
||||
if l < min {
|
||||
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
|
||||
}
|
||||
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
|
||||
|
||||
// Field offsets are as follows:
|
||||
// Num RefCount Protocol Flags Type St Inode Path
|
||||
|
||||
kernelPtr := strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
users, err := u.parseUsers(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
|
||||
}
|
||||
users, err := u.parseUsers(fields[netUnixRefCountIdx])
|
||||
|
||||
flags, err := u.parseFlags(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
|
||||
}
|
||||
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
|
||||
|
||||
typ, err := u.parseType(fields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
|
||||
}
|
||||
typ, err := u.parseType(fields[netUnixTypeIdx])
|
||||
|
||||
state, err := u.parseState(fields[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
|
||||
}
|
||||
state, err := u.parseState(fields[netUnixStateIdx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
|
||||
}
|
||||
|
||||
var inode uint64
|
||||
if hasInode {
|
||||
inodeStr := fields[netUnixInodeIdx]
|
||||
inode, err = u.parseInode(inodeStr)
|
||||
inode, err = u.parseInode(fields[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
|
||||
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
|
||||
}
|
||||
}
|
||||
|
||||
nuLine := &NetUnixLine{
|
||||
n := &NetUNIXLine{
|
||||
KernelPtr: kernelPtr,
|
||||
RefCount: users,
|
||||
Type: typ,
|
||||
@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
|
||||
}
|
||||
|
||||
// Path field is optional.
|
||||
if fieldsLen > minFieldsCnt {
|
||||
pathIdx := netUnixInodeIdx + 1
|
||||
if l > min {
|
||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||
// already present.
|
||||
pathIdx := 7
|
||||
if !hasInode {
|
||||
pathIdx--
|
||||
}
|
||||
nuLine.Path = fields[pathIdx]
|
||||
|
||||
n.Path = fields[pathIdx]
|
||||
}
|
||||
|
||||
return nuLine, nil
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseKernelPtr(str string) (string, error) {
|
||||
if !strings.HasSuffix(str, ":") {
|
||||
return "", errInvalidKernelPtrFmt
|
||||
}
|
||||
return str[:len(str)-1], nil
|
||||
func (u NetUNIX) parseUsers(s string) (uint64, error) {
|
||||
return strconv.ParseUint(s, 16, 32)
|
||||
}
|
||||
|
||||
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
|
||||
return strconv.ParseUint(hexStr, 16, 32)
|
||||
}
|
||||
|
||||
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
|
||||
typ, err := strconv.ParseUint(hexStr, 16, 16)
|
||||
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
|
||||
typ, err := strconv.ParseUint(s, 16, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixType(typ), nil
|
||||
|
||||
return NetUNIXType(typ), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
|
||||
flags, err := strconv.ParseUint(hexStr, 16, 32)
|
||||
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
|
||||
flags, err := strconv.ParseUint(s, 16, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixFlags(flags), nil
|
||||
|
||||
return NetUNIXFlags(flags), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
|
||||
st, err := strconv.ParseInt(hexStr, 16, 8)
|
||||
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
|
||||
st, err := strconv.ParseInt(s, 16, 8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixState(st), nil
|
||||
|
||||
return NetUNIXState(st), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
|
||||
return strconv.ParseUint(inodeStr, 10, 64)
|
||||
func (u NetUNIX) parseInode(s string) (uint64, error) {
|
||||
return strconv.ParseUint(s, 10, 64)
|
||||
}
|
||||
|
||||
func (t NetUnixType) String() string {
|
||||
func (t NetUNIXType) String() string {
|
||||
switch t {
|
||||
case netUnixTypeStream:
|
||||
return "stream"
|
||||
@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (f NetUnixFlags) String() string {
|
||||
func (f NetUNIXFlags) String() string {
|
||||
switch f {
|
||||
case netUnixFlagListen:
|
||||
return "listen"
|
||||
@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
func (s NetUnixState) String() string {
|
||||
func (s NetUNIXState) String() string {
|
||||
switch s {
|
||||
case netUnixStateUnconnected:
|
||||
return "unconnected"
|
||||
|
21
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
21
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||
}
|
||||
|
||||
// Wchan returns the wchan (wait channel) of a process.
|
||||
func (p Proc) Wchan() (string, error) {
|
||||
f, err := os.Open(p.path("wchan"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
wchan := string(data)
|
||||
if wchan == "" || wchan == "0" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return wchan, nil
|
||||
}
|
||||
|
||||
// Comm returns the command name of a process.
|
||||
func (p Proc) Comm() (string, error) {
|
||||
data, err := util.ReadFileNoStat(p.path("comm"))
|
||||
|
98
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
Normal file
98
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||
// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
|
||||
// in this hierarchy
|
||||
//
|
||||
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
|
||||
type Cgroup struct {
|
||||
// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
|
||||
// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
|
||||
HierarchyID int
|
||||
// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
|
||||
// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
|
||||
Controllers []string
|
||||
// Path of this control group, relative to the mount point of the cgroupfs representing this specific
|
||||
// hierarchy
|
||||
Path string
|
||||
}
|
||||
|
||||
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
|
||||
// Line format is hierarchyID:[controller1,controller2]:path
|
||||
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
||||
var err error
|
||||
|
||||
fields := strings.Split(cgroupStr, ":")
|
||||
if len(fields) < 3 {
|
||||
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
|
||||
}
|
||||
|
||||
cgroup := &Cgroup{
|
||||
Path: fields[2],
|
||||
Controllers: nil,
|
||||
}
|
||||
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse hierarchy ID")
|
||||
}
|
||||
if fields[1] != "" {
|
||||
ssNames := strings.Split(fields[1], ",")
|
||||
cgroup.Controllers = append(cgroup.Controllers, ssNames...)
|
||||
}
|
||||
return cgroup, nil
|
||||
}
|
||||
|
||||
// parseCgroups reads each line of the /proc/[pid]/cgroup file
|
||||
func parseCgroups(data []byte) ([]Cgroup, error) {
|
||||
var cgroups []Cgroup
|
||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||
for scanner.Scan() {
|
||||
mountString := scanner.Text()
|
||||
parsedMounts, err := parseCgroupString(mountString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cgroups = append(cgroups, *parsedMounts)
|
||||
}
|
||||
|
||||
err := scanner.Err()
|
||||
return cgroups, err
|
||||
}
|
||||
|
||||
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
|
||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
||||
func (p Proc) Cgroups() ([]Cgroup, error) {
|
||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseCgroups(data)
|
||||
}
|
34
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
34
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
@ -16,6 +16,7 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"regexp"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
@ -23,10 +24,11 @@ import (
|
||||
|
||||
// Regexp variables
|
||||
var (
|
||||
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
||||
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
||||
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
|
||||
rInotify = regexp.MustCompile(`^inotify`)
|
||||
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
||||
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
||||
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
|
||||
rInotify = regexp.MustCompile(`^inotify`)
|
||||
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
|
||||
)
|
||||
|
||||
// ProcFDInfo contains represents file descriptor information.
|
||||
@ -39,7 +41,7 @@ type ProcFDInfo struct {
|
||||
Flags string
|
||||
// Mount point ID
|
||||
MntID string
|
||||
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
|
||||
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
|
||||
InotifyInfos []InotifyInfo
|
||||
}
|
||||
|
||||
@ -96,15 +98,21 @@ type InotifyInfo struct {
|
||||
|
||||
// InotifyInfo constructor. Only available on kernel 3.8+.
|
||||
func parseInotifyInfo(line string) (*InotifyInfo, error) {
|
||||
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
|
||||
m := r.FindStringSubmatch(line)
|
||||
i := &InotifyInfo{
|
||||
WD: m[1],
|
||||
Ino: m[2],
|
||||
Sdev: m[3],
|
||||
Mask: m[4],
|
||||
m := rInotifyParts.FindStringSubmatch(line)
|
||||
if len(m) >= 4 {
|
||||
var mask string
|
||||
if len(m) == 5 {
|
||||
mask = m[4]
|
||||
}
|
||||
i := &InotifyInfo{
|
||||
WD: m[1],
|
||||
Ino: m[2],
|
||||
Sdev: m[3],
|
||||
Mask: mask,
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
return i, nil
|
||||
return nil, errors.New("invalid inode entry: " + line)
|
||||
}
|
||||
|
||||
// ProcFDInfos represents a list of ProcFDInfo structs.
|
||||
|
209
vendor/github.com/prometheus/procfs/proc_maps.go
generated
vendored
Normal file
209
vendor/github.com/prometheus/procfs/proc_maps.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
|
||||
type ProcMapPermissions struct {
|
||||
// mapping has the [R]ead flag set
|
||||
Read bool
|
||||
// mapping has the [W]rite flag set
|
||||
Write bool
|
||||
// mapping has the [X]ecutable flag set
|
||||
Execute bool
|
||||
// mapping has the [S]hared flag set
|
||||
Shared bool
|
||||
// mapping is marked as [P]rivate (copy on write)
|
||||
Private bool
|
||||
}
|
||||
|
||||
// ProcMap contains the process memory-mappings of the process,
|
||||
// read from /proc/[pid]/maps
|
||||
type ProcMap struct {
|
||||
// The start address of current mapping.
|
||||
StartAddr uintptr
|
||||
// The end address of the current mapping
|
||||
EndAddr uintptr
|
||||
// The permissions for this mapping
|
||||
Perms *ProcMapPermissions
|
||||
// The current offset into the file/fd (e.g., shared libs)
|
||||
Offset int64
|
||||
// Device owner of this mapping (major:minor) in Mkdev format.
|
||||
Dev uint64
|
||||
// The inode of the device above
|
||||
Inode uint64
|
||||
// The file or psuedofile (or empty==anonymous)
|
||||
Pathname string
|
||||
}
|
||||
|
||||
// parseDevice parses the device token of a line and converts it to a dev_t
|
||||
// (mkdev) like structure.
|
||||
func parseDevice(s string) (uint64, error) {
|
||||
toks := strings.Split(s, ":")
|
||||
if len(toks) < 2 {
|
||||
return 0, fmt.Errorf("unexpected number of fields")
|
||||
}
|
||||
|
||||
major, err := strconv.ParseUint(toks[0], 16, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
minor, err := strconv.ParseUint(toks[1], 16, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return unix.Mkdev(uint32(major), uint32(minor)), nil
|
||||
}
|
||||
|
||||
// parseAddress just converts a hex-string to a uintptr
|
||||
func parseAddress(s string) (uintptr, error) {
|
||||
a, err := strconv.ParseUint(s, 16, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return uintptr(a), nil
|
||||
}
|
||||
|
||||
// parseAddresses parses the start-end address
|
||||
func parseAddresses(s string) (uintptr, uintptr, error) {
|
||||
toks := strings.Split(s, "-")
|
||||
if len(toks) < 2 {
|
||||
return 0, 0, fmt.Errorf("invalid address")
|
||||
}
|
||||
|
||||
saddr, err := parseAddress(toks[0])
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
eaddr, err := parseAddress(toks[1])
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return saddr, eaddr, nil
|
||||
}
|
||||
|
||||
// parsePermissions parses a token and returns any that are set.
|
||||
func parsePermissions(s string) (*ProcMapPermissions, error) {
|
||||
if len(s) < 4 {
|
||||
return nil, fmt.Errorf("invalid permissions token")
|
||||
}
|
||||
|
||||
perms := ProcMapPermissions{}
|
||||
for _, ch := range s {
|
||||
switch ch {
|
||||
case 'r':
|
||||
perms.Read = true
|
||||
case 'w':
|
||||
perms.Write = true
|
||||
case 'x':
|
||||
perms.Execute = true
|
||||
case 'p':
|
||||
perms.Private = true
|
||||
case 's':
|
||||
perms.Shared = true
|
||||
}
|
||||
}
|
||||
|
||||
return &perms, nil
|
||||
}
|
||||
|
||||
// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
|
||||
// buffer.
|
||||
func parseProcMap(text string) (*ProcMap, error) {
|
||||
fields := strings.Fields(text)
|
||||
if len(fields) < 5 {
|
||||
return nil, fmt.Errorf("truncated procmap entry")
|
||||
}
|
||||
|
||||
saddr, eaddr, err := parseAddresses(fields[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
perms, err := parsePermissions(fields[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset, err := strconv.ParseInt(fields[2], 16, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
device, err := parseDevice(fields[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inode, err := strconv.ParseUint(fields[4], 10, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pathname := ""
|
||||
|
||||
if len(fields) >= 5 {
|
||||
pathname = strings.Join(fields[5:], " ")
|
||||
}
|
||||
|
||||
return &ProcMap{
|
||||
StartAddr: saddr,
|
||||
EndAddr: eaddr,
|
||||
Perms: perms,
|
||||
Offset: offset,
|
||||
Dev: device,
|
||||
Inode: inode,
|
||||
Pathname: pathname,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
|
||||
// process.
|
||||
func (p Proc) ProcMaps() ([]*ProcMap, error) {
|
||||
file, err := os.Open(p.path("maps"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
maps := []*ProcMap{}
|
||||
scan := bufio.NewScanner(file)
|
||||
|
||||
for scan.Scan() {
|
||||
m, err := parseProcMap(scan.Text())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
maps = append(maps, m)
|
||||
}
|
||||
|
||||
return maps, nil
|
||||
}
|
165
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
Normal file
165
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// match the header line before each mapped zone in /proc/pid/smaps
|
||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||
)
|
||||
|
||||
type ProcSMapsRollup struct {
|
||||
// Amount of the mapping that is currently resident in RAM
|
||||
Rss uint64
|
||||
// Process's proportional share of this mapping
|
||||
Pss uint64
|
||||
// Size in bytes of clean shared pages
|
||||
SharedClean uint64
|
||||
// Size in bytes of dirty shared pages
|
||||
SharedDirty uint64
|
||||
// Size in bytes of clean private pages
|
||||
PrivateClean uint64
|
||||
// Size in bytes of dirty private pages
|
||||
PrivateDirty uint64
|
||||
// Amount of memory currently marked as referenced or accessed
|
||||
Referenced uint64
|
||||
// Amount of memory that does not belong to any file
|
||||
Anonymous uint64
|
||||
// Amount would-be-anonymous memory currently on swap
|
||||
Swap uint64
|
||||
// Process's proportional memory on swap
|
||||
SwapPss uint64
|
||||
}
|
||||
|
||||
// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
|
||||
// process.
|
||||
//
|
||||
// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
|
||||
// we read and summed.
|
||||
func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
|
||||
data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return p.procSMapsRollupManual()
|
||||
}
|
||||
if err != nil {
|
||||
return ProcSMapsRollup{}, err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
smaps := ProcSMapsRollup{}
|
||||
|
||||
// skip first line which don't contains information we need
|
||||
lines = lines[1:]
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := smaps.parseLine(line); err != nil {
|
||||
return ProcSMapsRollup{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return smaps, nil
|
||||
}
|
||||
|
||||
// Read /proc/pid/smaps and do the roll-up in Go code.
|
||||
func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
|
||||
file, err := os.Open(p.path("smaps"))
|
||||
if err != nil {
|
||||
return ProcSMapsRollup{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
smaps := ProcSMapsRollup{}
|
||||
scan := bufio.NewScanner(file)
|
||||
|
||||
for scan.Scan() {
|
||||
line := scan.Text()
|
||||
|
||||
if procSMapsHeaderLine.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := smaps.parseLine(line); err != nil {
|
||||
return ProcSMapsRollup{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return smaps, nil
|
||||
}
|
||||
|
||||
func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
fmt.Println(line)
|
||||
return errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
|
||||
k := kv[0]
|
||||
if k == "VmFlags" {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(kv[1])
|
||||
v = strings.TrimRight(v, " kB")
|
||||
|
||||
vKBytes, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vBytes := vKBytes * 1024
|
||||
|
||||
s.addValue(k, v, vKBytes, vBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||
switch k {
|
||||
case "Rss":
|
||||
s.Rss += vUintBytes
|
||||
case "Pss":
|
||||
s.Pss += vUintBytes
|
||||
case "Shared_Clean":
|
||||
s.SharedClean += vUintBytes
|
||||
case "Shared_Dirty":
|
||||
s.SharedDirty += vUintBytes
|
||||
case "Private_Clean":
|
||||
s.PrivateClean += vUintBytes
|
||||
case "Private_Dirty":
|
||||
s.PrivateDirty += vUintBytes
|
||||
case "Referenced":
|
||||
s.Referenced += vUintBytes
|
||||
case "Anonymous":
|
||||
s.Anonymous += vUintBytes
|
||||
case "Swap":
|
||||
s.Swap += vUintBytes
|
||||
case "SwapPss":
|
||||
s.SwapPss += vUintBytes
|
||||
}
|
||||
}
|
41
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
41
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -33,37 +33,37 @@ type ProcStatus struct {
|
||||
TGID int
|
||||
|
||||
// Peak virtual memory size.
|
||||
VmPeak uint64
|
||||
VmPeak uint64 // nolint:golint
|
||||
// Virtual memory size.
|
||||
VmSize uint64
|
||||
VmSize uint64 // nolint:golint
|
||||
// Locked memory size.
|
||||
VmLck uint64
|
||||
VmLck uint64 // nolint:golint
|
||||
// Pinned memory size.
|
||||
VmPin uint64
|
||||
VmPin uint64 // nolint:golint
|
||||
// Peak resident set size.
|
||||
VmHWM uint64
|
||||
VmHWM uint64 // nolint:golint
|
||||
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||
VmRSS uint64
|
||||
VmRSS uint64 // nolint:golint
|
||||
// Size of resident anonymous memory.
|
||||
RssAnon uint64
|
||||
RssAnon uint64 // nolint:golint
|
||||
// Size of resident file mappings.
|
||||
RssFile uint64
|
||||
RssFile uint64 // nolint:golint
|
||||
// Size of resident shared memory.
|
||||
RssShmem uint64
|
||||
RssShmem uint64 // nolint:golint
|
||||
// Size of data segments.
|
||||
VmData uint64
|
||||
VmData uint64 // nolint:golint
|
||||
// Size of stack segments.
|
||||
VmStk uint64
|
||||
VmStk uint64 // nolint:golint
|
||||
// Size of text segments.
|
||||
VmExe uint64
|
||||
VmExe uint64 // nolint:golint
|
||||
// Shared library code size.
|
||||
VmLib uint64
|
||||
VmLib uint64 // nolint:golint
|
||||
// Page table entries size.
|
||||
VmPTE uint64
|
||||
VmPTE uint64 // nolint:golint
|
||||
// Size of second-level page tables.
|
||||
VmPMD uint64
|
||||
VmPMD uint64 // nolint:golint
|
||||
// Swapped-out virtual memory size by anonymous private.
|
||||
VmSwap uint64
|
||||
VmSwap uint64 // nolint:golint
|
||||
// Size of hugetlb memory portions
|
||||
HugetlbPages uint64
|
||||
|
||||
@ -71,6 +71,11 @@ type ProcStatus struct {
|
||||
VoluntaryCtxtSwitches uint64
|
||||
// Number of involuntary context switches.
|
||||
NonVoluntaryCtxtSwitches uint64
|
||||
|
||||
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
|
||||
UIDs [4]string
|
||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||
GIDs [4]string
|
||||
}
|
||||
|
||||
// NewStatus returns the current status information of the process.
|
||||
@ -114,6 +119,10 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||
s.TGID = int(vUint)
|
||||
case "Name":
|
||||
s.Name = vString
|
||||
case "Uid":
|
||||
copy(s.UIDs[:], strings.Split(vString, "\t"))
|
||||
case "Gid":
|
||||
copy(s.GIDs[:], strings.Split(vString, "\t"))
|
||||
case "VmPeak":
|
||||
s.VmPeak = vUintBytes
|
||||
case "VmSize":
|
||||
|
89
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
Normal file
89
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Swap represents an entry in /proc/swaps.
|
||||
type Swap struct {
|
||||
Filename string
|
||||
Type string
|
||||
Size int
|
||||
Used int
|
||||
Priority int
|
||||
}
|
||||
|
||||
// Swaps returns a slice of all configured swap devices on the system.
|
||||
func (fs FS) Swaps() ([]*Swap, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseSwaps(data)
|
||||
}
|
||||
|
||||
func parseSwaps(info []byte) ([]*Swap, error) {
|
||||
swaps := []*Swap{}
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
scanner.Scan() // ignore header line
|
||||
for scanner.Scan() {
|
||||
swapString := scanner.Text()
|
||||
parsedSwap, err := parseSwapString(swapString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
swaps = append(swaps, parsedSwap)
|
||||
}
|
||||
|
||||
err := scanner.Err()
|
||||
return swaps, err
|
||||
}
|
||||
|
||||
func parseSwapString(swapString string) (*Swap, error) {
|
||||
var err error
|
||||
|
||||
swapFields := strings.Fields(swapString)
|
||||
swapLength := len(swapFields)
|
||||
if swapLength < 5 {
|
||||
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
|
||||
}
|
||||
|
||||
swap := &Swap{
|
||||
Filename: swapFields[0],
|
||||
Type: swapFields[1],
|
||||
}
|
||||
|
||||
swap.Size, err = strconv.Atoi(swapFields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
|
||||
}
|
||||
swap.Used, err = strconv.Atoi(swapFields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
|
||||
}
|
||||
swap.Priority, err = strconv.Atoi(swapFields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
|
||||
}
|
||||
|
||||
return swap, nil
|
||||
}
|
Reference in New Issue
Block a user