rebase: bump the github-dependencies group with 2 updates

Bumps the github-dependencies group with 2 updates: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) and [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang).


Updates `github.com/aws/aws-sdk-go` from 1.45.16 to 1.45.18
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.45.16...v1.45.18)

Updates `github.com/prometheus/client_golang` from 1.16.0 to 1.17.0
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/v1.17.0/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: github-dependencies
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: github-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-09-28 09:08:59 +00:00 committed by mergify[bot]
parent e504987984
commit fd1dbfe17e
68 changed files with 1015 additions and 535 deletions

8
go.mod
View File

@ -4,7 +4,7 @@ go 1.20
require ( require (
github.com/IBM/keyprotect-go-client v0.12.2 github.com/IBM/keyprotect-go-client v0.12.2
github.com/aws/aws-sdk-go v1.45.16 github.com/aws/aws-sdk-go v1.45.18
github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 github.com/aws/aws-sdk-go-v2/service/sts v1.22.0
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000 github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag // TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
@ -25,7 +25,7 @@ require (
github.com/onsi/ginkgo/v2 v2.12.1 github.com/onsi/ginkgo/v2 v2.12.1
github.com/onsi/gomega v1.27.10 github.com/onsi/gomega v1.27.10
github.com/pkg/xattr v0.4.9 github.com/pkg/xattr v0.4.9
github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.13.0 golang.org/x/crypto v0.13.0
golang.org/x/net v0.15.0 golang.org/x/net v0.15.0
@ -130,9 +130,9 @@ require (
github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/procfs v0.11.1 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect

13
go.sum
View File

@ -716,8 +716,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.45.16 h1:spca2z7UJgoQ5V2fX6XiHDCj2E65kOJAfbUPozSkE24= github.com/aws/aws-sdk-go v1.45.18 h1:uSOGg4LFtpQH/bq9FsumMKfZHNl7BdH7WURHOqKXHNU=
github.com/aws/aws-sdk-go v1.45.16/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.45.18/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc=
github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g=
@ -1640,16 +1640,18 @@ github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@ -1676,8 +1678,9 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=

View File

@ -2572,21 +2572,81 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "eu-west-3", Region: "eu-west-3",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "appflow-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "appflow-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "appflow-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "appflow-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "sa-east-1", Region: "sa-east-1",
}: endpoint{}, }: endpoint{},
endpointKey{ endpointKey{
Region: "us-east-1", Region: "us-east-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-east-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-east-2", Region: "us-east-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-east-2.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-1", Region: "us-west-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-west-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-west-2.amazonaws.com",
},
}, },
}, },
"application-autoscaling": service{ "application-autoscaling": service{
@ -11044,6 +11104,12 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.ca-central-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
@ -11062,6 +11128,15 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "eu-west-3", Region: "eu-west-3",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
Hostname: "email-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "fips-us-east-1", Region: "fips-us-east-1",
}: endpoint{ }: endpoint{
@ -11071,6 +11146,24 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "email-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "email-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "fips-us-west-2", Region: "fips-us-west-2",
}: endpoint{ }: endpoint{
@ -11101,9 +11194,21 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "us-east-2", Region: "us-east-2",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.us-east-2.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-1", Region: "us-west-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.us-west-1.amazonaws.com",
},
endpointKey{ endpointKey{
Region: "us-west-2", Region: "us-west-2",
}: endpoint{}, }: endpoint{},
@ -18834,12 +18939,30 @@ var awsPartition = partition{
}, },
"meetings-chime": service{ "meetings-chime": service{
Endpoints: serviceEndpoints{ Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ap-southeast-1", Region: "ap-southeast-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{ endpointKey{
Region: "il-central-1", Region: "il-central-1",
}: endpoint{}, }: endpoint{},
@ -20519,6 +20642,14 @@ var awsPartition = partition{
}, },
Deprecated: boxedTrue, Deprecated: boxedTrue,
}, },
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "omics.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{ endpointKey{
Region: "us-east-1", Region: "us-east-1",
}: endpoint{ }: endpoint{

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.45.16" const SDKVersion = "1.45.18"

View File

@ -22,7 +22,7 @@ import "github.com/prometheus/client_golang/prometheus"
// Prometheus metrics. Note that the data models of expvar and Prometheus are // Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower // fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great // than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more // for experiments and prototyping, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production // direct implementation of Prometheus metrics for monitoring production
// systems. // systems.
// //

View File

@ -132,16 +132,19 @@ type GoCollectionOption uint32
const ( const (
// GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure.
// Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. //
// Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector.
GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota
// GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package.
// Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) //
// Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})
// function to enable those metrics in the collector. // function to enable those metrics in the collector.
GoRuntimeMetricsCollection GoRuntimeMetricsCollection
) )
// WithGoCollections allows enabling different collections for Go collector on top of base metrics. // WithGoCollections allows enabling different collections for Go collector on top of base metrics.
// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. //
// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics.
func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) {
return func(options *internal.GoCollectorOptions) { return func(options *internal.GoCollectorOptions) {
if flags&GoRuntimeMemStatsCollection == 0 { if flags&GoRuntimeMemStatsCollection == 0 {

View File

@ -20,6 +20,7 @@ import (
"time" "time"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// Counter is a Metric that represents a single numerical value that only ever // Counter is a Metric that represents a single numerical value that only ever
@ -66,7 +67,7 @@ type CounterVecOpts struct {
CounterOpts CounterOpts
// VariableLabels are used to partition the metric vector by the given set // VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint // of labels. Each label value will be constrained with the optional Constraint
// function, if provided. // function, if provided.
VariableLabels ConstrainableLabels VariableLabels ConstrainableLabels
} }
@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil, nil,
opts.ConstLabels, opts.ConstLabels,
) )
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} if opts.now == nil {
opts.now = time.Now
}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result return result
} }
@ -106,10 +111,12 @@ type counter struct {
selfCollector selfCollector
desc *Desc desc *Desc
createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar. exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing. // now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
func (c *counter) Desc() *Desc { func (c *counter) Desc() *Desc {
@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error {
exemplar = e.(*dto.Exemplar) exemplar = e.(*dto.Exemplar)
} }
val := c.get() val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
} }
func (c *counter) updateExemplar(v float64, l Labels) { func (c *counter) updateExemplar(v float64, l Labels) {
@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
opts.VariableLabels, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
if opts.now == nil {
opts.now = time.Now
}
return &CounterVec{ return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
} }
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result return result
}), }),
} }

View File

@ -52,7 +52,7 @@ type Desc struct {
constLabelPairs []*dto.LabelPair constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels and normalization function for // variableLabels contains names of labels and normalization function for
// which the metric maintains variable values. // which the metric maintains variable values.
variableLabels ConstrainedLabels variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This // id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be // must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor. // used as an identifier of the descriptor.
@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
d := &Desc{ d := &Desc{
fqName: fqName, fqName: fqName,
help: help, help: help,
variableLabels: variableLabels.constrainedLabels(), variableLabels: variableLabels.compile(),
} }
if !model.IsValidMetricName(model.LabelValue(fqName)) { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// their sorted label names) plus the fqName (at position 0). // their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1) labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{} labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them... // First add only the const label names and sort them...
for labelName := range constLabels { for labelName := range constLabels {
@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// Now add the variable label names, but prefix them with something that // Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label // cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels. // dimension with a different mix between preset and variable labels.
for _, label := range d.variableLabels { for _, label := range d.variableLabels.names {
if !checkLabelName(label.Name) { if !checkLabelName(label) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d return d
} }
labelNames = append(labelNames, "$"+label.Name) labelNames = append(labelNames, "$"+label)
labelNameSet[label.Name] = struct{}{} labelNameSet[label] = struct{}{}
} }
if len(labelNames) != len(labelNameSet) { if len(labelNames) != len(labelNameSet) {
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
@ -189,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
) )
} }
vlStrings := make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
}
return fmt.Sprintf( return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName, d.fqName,
d.help, d.help,
strings.Join(lpStrings, ","), strings.Join(lpStrings, ","),
d.variableLabels, strings.Join(vlStrings, ","),
) )
} }

View File

@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue continue
} }
var v interface{} var v interface{}
labels := make([]string, len(desc.variableLabels)) labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err) ch <- NewInvalidMetric(desc, err)
continue continue

View File

@ -62,7 +62,7 @@ type GaugeVecOpts struct {
GaugeOpts GaugeOpts
// VariableLabels are used to partition the metric vector by the given set // VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint // of labels. Each label value will be constrained with the optional Constraint
// function, if provided. // function, if provided.
VariableLabels ConstrainableLabels VariableLabels ConstrainableLabels
} }
@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error { func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, nil, out) return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
} }
// GaugeVec is a Collector that bundles a set of Gauges that all share the same // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
} }
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.

View File

@ -25,6 +25,7 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// nativeHistogramBounds for the frac of observed values. Only relevant for // nativeHistogramBounds for the frac of observed values. Only relevant for
@ -391,7 +392,7 @@ type HistogramOpts struct {
// zero, it is replaced by default buckets. The default buckets are // zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used, // DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to // otherwise the default is no buckets. (In other words, if you want to
// use both reguler buckets and buckets for a native histogram, you have // use both regular buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.) // to define the regular buckets here explicitly.)
Buckets []float64 Buckets []float64
@ -413,8 +414,8 @@ type HistogramOpts struct {
// and 2, same as between 2 and 4, and 4 and 8, etc.). // and 2, same as between 2 and 4, and 4 and 8, etc.).
// //
// Details about the actually used factor: The factor is calculated as // Details about the actually used factor: The factor is calculated as
// 2^(2^n), where n is an integer number between (and including) -8 and // 2^(2^-n), where n is an integer number between (and including) -4 and
// 4. n is chosen so that the resulting factor is the largest that is // 8. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the // still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
@ -428,12 +429,12 @@ type HistogramOpts struct {
// a major version bump. // a major version bump.
NativeHistogramBucketFactor float64 NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal // All observations with an absolute value of less or equal
// NativeHistogramZeroThreshold are accumulated into a “zero” // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
// bucket. For best results, this should be close to a bucket // For best results, this should be close to a bucket boundary. This is
// boundary. This is usually the case if picking a power of two. If // usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero, // NativeHistogramZeroThreshold is left at zero,
// DefNativeHistogramZeroThreshold is used as the threshold. To configure // DefNativeHistogramZeroThreshold is used as the threshold. To
// a zero bucket with an actual threshold of zero (i.e. only // configure a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set // observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value). // constant (or any negative float value).
@ -446,26 +447,34 @@ type HistogramOpts struct {
// Histogram are sufficiently wide-spread. In particular, this could be // Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on // used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a // external inputs, it is highly recommended to set a
// NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
// enacted: First, if the last reset (or the creation) of the histogram // enacted:
// is at least NativeHistogramMinResetDuration ago, then the whole // - First, if the last reset (or the creation) of the histogram is at
// histogram is reset to its initial state (including regular // least NativeHistogramMinResetDuration ago, then the whole
// buckets). If less time has passed, or if // histogram is reset to its initial state (including regular
// NativeHistogramMinResetDuration is zero, no reset is // buckets).
// performed. Instead, the zero threshold is increased sufficiently to // - If less time has passed, or if NativeHistogramMinResetDuration is
// reduce the number of buckets to or below // zero, no reset is performed. Instead, the zero threshold is
// NativeHistogramMaxBucketNumber, but not to more than // increased sufficiently to reduce the number of buckets to or below
// NativeHistogramMaxZeroThreshold. Thus, if // NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold is already at or below the current // NativeHistogramMaxZeroThreshold. Thus, if
// zero threshold, nothing happens at this step. After that, if the // NativeHistogramMaxZeroThreshold is already at or below the current
// number of buckets still exceeds NativeHistogramMaxBucketNumber, the // zero threshold, nothing happens at this step.
// resolution of the histogram is reduced by doubling the width of the // - After that, if the number of buckets still exceeds
// sparse buckets (up to a growth factor between one bucket to the next // NativeHistogramMaxBucketNumber, the resolution of the histogram is
// of 2^(2^4) = 65536, see above). // reduced by doubling the width of the sparse buckets (up to a
// growth factor between one bucket to the next of 2^(2^4) = 65536,
// see above).
// - Any increased zero threshold or reduced resolution is reset back
// to their original values once NativeHistogramMinResetDuration has
// passed (since the last reset or the creation of the histogram).
NativeHistogramMaxBucketNumber uint32 NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64 NativeHistogramMaxZeroThreshold float64
// now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
// HistogramVecOpts bundles the options to create a HistogramVec metric. // HistogramVecOpts bundles the options to create a HistogramVec metric.
@ -475,7 +484,7 @@ type HistogramVecOpts struct {
HistogramOpts HistogramOpts
// VariableLabels are used to partition the metric vector by the given set // VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint // of labels. Each label value will be constrained with the optional Constraint
// function, if provided. // function, if provided.
VariableLabels ConstrainableLabels VariableLabels ConstrainableLabels
} }
@ -499,12 +508,12 @@ func NewHistogram(opts HistogramOpts) Histogram {
} }
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels.names {
if n.Name == bucketLabel { if n == bucketLabel {
panic(errBucketLabelNotAllowed) panic(errBucketLabelNotAllowed)
} }
} }
@ -514,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
} }
if opts.now == nil {
opts.now = time.Now
}
h := &histogram{ h := &histogram{
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
@ -521,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
lastResetTime: time.Now(), lastResetTime: opts.now(),
now: time.Now, now: opts.now,
} }
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets h.upperBounds = DefBuckets
@ -701,9 +714,11 @@ type histogram struct {
nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxZeroThreshold float64
nativeHistogramMaxBuckets uint32 nativeHistogramMaxBuckets uint32
nativeHistogramMinResetDuration time.Duration nativeHistogramMinResetDuration time.Duration
lastResetTime time.Time // Protected by mtx. // lastResetTime is protected by mtx. It is also used as created timestamp.
lastResetTime time.Time
now func() time.Time // To mock out time.Now() for testing. // now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
func (h *histogram) Desc() *Desc { func (h *histogram) Desc() *Desc {
@ -742,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error {
waitForCooldown(count, coldCounts) waitForCooldown(count, coldCounts)
his := &dto.Histogram{ his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)), Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count), SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: timestamppb.New(h.lastResetTime),
} }
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs
@ -782,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error {
his.ZeroCount = proto.Uint64(zeroBucket) his.ZeroCount = proto.Uint64(zeroBucket)
his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
// Add a no-op span to a histogram without observations and with
// a zero threshold of zero. Otherwise, a native histogram would
// look like a classic histogram to scrapers.
if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
his.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
} }
addAndResetCounts(hotCounts, coldCounts) addAndResetCounts(hotCounts, coldCounts)
return nil return nil
@ -854,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
h.doubleBucketWidth(hotCounts, coldCounts) h.doubleBucketWidth(hotCounts, coldCounts)
} }
// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration // maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
// has been passed. It returns true if the histogram has been reset. The caller // has been passed. It returns true if the histogram has been reset. The caller
// must have locked h.mtx. // must have locked h.mtx.
func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { func (h *histogram) maybeReset(
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
) bool {
// We are using the possibly mocked h.now() rather than // We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing. // time.Since(h.lastResetTime) to enable testing.
if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { if h.nativeHistogramMinResetDuration == 0 ||
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false return false
} }
// Completely reset coldCounts. // Completely reset coldCounts.
h.resetCounts(cold) h.resetCounts(cold)
// Repeat the latest observation to not lose it completely. // Repeat the latest observation to not lose it completely.
cold.observe(value, bucket, true) cold.observe(value, bucket, true)
// Make coldCounts the new hot counts while ressetting countAndHotIdx. // Make coldCounts the new hot counts while resetting countAndHotIdx.
n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
count := n & ((1 << 63) - 1) count := n & ((1 << 63) - 1)
waitForCooldown(count, hot) waitForCooldown(count, hot)
@ -1176,6 +1205,7 @@ type constHistogram struct {
sum float64 sum float64
buckets map[float64]uint64 buckets map[float64]uint64
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Desc() *Desc {
@ -1183,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc {
} }
func (h *constHistogram) Write(out *dto.Metric) error { func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{} his := &dto.Histogram{
CreatedTimestamp: h.createdTs,
}
buckets := make([]*dto.Bucket, 0, len(h.buckets)) buckets := make([]*dto.Bucket, 0, len(h.buckets))
@ -1230,7 +1262,7 @@ func NewConstHistogram(
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
return &constHistogram{ return &constHistogram{
@ -1324,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
// Multiple spans with only small gaps in between are probably // Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty // encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now, // buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one ore two buckets should not create // we assume that gaps of one or two buckets should not create
// a new span. // a new span.
iDelta := int32(i - nextI) iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 { if n == 0 || iDelta > 2 {

View File

@ -14,7 +14,7 @@
// It provides tools to compare sequences of strings and generate textual diffs. // It provides tools to compare sequences of strings and generate textual diffs.
// //
// Maintaining `GetUnifiedDiffString` here because original repository // Maintaining `GetUnifiedDiffString` here because original repository
// (https://github.com/pmezard/go-difflib) is no loger maintained. // (https://github.com/pmezard/go-difflib) is no longer maintained.
package internal package internal
import ( import (

View File

@ -32,19 +32,15 @@ import (
// create a Desc. // create a Desc.
type Labels map[string]string type Labels map[string]string
// LabelConstraint normalizes label values.
type LabelConstraint func(string) string
// ConstrainedLabels represents a label name and its constrain function // ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing // to normalize label values. This type is commonly used when constructing
// metric vector Collectors. // metric vector Collectors.
type ConstrainedLabel struct { type ConstrainedLabel struct {
Name string Name string
Constraint func(string) string Constraint LabelConstraint
}
func (cl ConstrainedLabel) Constrain(v string) string {
if cl.Constraint == nil {
return v
}
return cl.Constraint(v)
} }
// ConstrainableLabels is an interface that allows creating of labels that can // ConstrainableLabels is an interface that allows creating of labels that can
@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string {
// }, // },
// }) // })
type ConstrainableLabels interface { type ConstrainableLabels interface {
constrainedLabels() ConstrainedLabels compile() *compiledLabels
labelNames() []string labelNames() []string
} }
@ -67,8 +63,20 @@ type ConstrainableLabels interface {
// metric vector Collectors. // metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { func (cls ConstrainedLabels) compile() *compiledLabels {
return cls compiled := &compiledLabels{
names: make([]string, len(cls)),
labelConstraints: map[string]LabelConstraint{},
}
for i, label := range cls {
compiled.names[i] = label.Name
if label.Constraint != nil {
compiled.labelConstraints[label.Name] = label.Constraint
}
}
return compiled
} }
func (cls ConstrainedLabels) labelNames() []string { func (cls ConstrainedLabels) labelNames() []string {
@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string {
// } // }
type UnconstrainedLabels []string type UnconstrainedLabels []string
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { func (uls UnconstrainedLabels) compile() *compiledLabels {
constrainedLabels := make([]ConstrainedLabel, len(uls)) return &compiledLabels{
for i, l := range uls { names: uls,
constrainedLabels[i] = ConstrainedLabel{Name: l}
} }
return constrainedLabels
} }
func (uls UnconstrainedLabels) labelNames() []string { func (uls UnconstrainedLabels) labelNames() []string {
return uls return uls
} }
type compiledLabels struct {
names []string
labelConstraints map[string]LabelConstraint
}
func (cls *compiledLabels) compile() *compiledLabels {
return cls
}
func (cls *compiledLabels) labelNames() []string {
return cls.names
}
func (cls *compiledLabels) constrain(labelName, value string) string {
if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
return fn(value)
}
return value
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied // reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names. // label names.
const reservedLabelPrefix = "__" const reservedLabelPrefix = "__"

View File

@ -92,6 +92,9 @@ type Opts struct {
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
// BuildFQName joins the given three name components by "_". Empty name // BuildFQName joins the given three name components by "_". Empty name

View File

@ -389,16 +389,13 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true return true
} }
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{} labels := prometheus.Labels{}
if !(code || method) {
return labels
}
if code { if code {
labels["code"] = sanitizeCode(status) labels["code"] = sanitizeCode(status)
} }

View File

@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget-- goroutineBudget--
runtime.Gosched() runtime.Gosched()
} }
// Once both checkedMetricChan and uncheckdMetricChan are closed // Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc, // and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here. // and then we can leave the collect loop here.
if cmc == nil && umc == nil { if cmc == nil && umc == nil {
@ -963,9 +963,9 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric? // Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs) copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels { for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l.Name), Name: proto.String(l),
}) })
} }
if len(lpsFromDesc) != len(dtoMetric.Label) { if len(lpsFromDesc) != len(dtoMetric.Label) {

View File

@ -26,6 +26,7 @@ import (
"github.com/beorn7/perks/quantile" "github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// quantileLabel is used for the label that defines the quantile in a // quantileLabel is used for the label that defines the quantile in a
@ -145,6 +146,9 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package // is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile"). // "github.com/bmizerany/perks/quantile").
BufCap uint32 BufCap uint32
// now is for testing purposes, by default it's time.Now.
now func() time.Time
} }
// SummaryVecOpts bundles the options to create a SummaryVec metric. // SummaryVecOpts bundles the options to create a SummaryVec metric.
@ -154,7 +158,7 @@ type SummaryVecOpts struct {
SummaryOpts SummaryOpts
// VariableLabels are used to partition the metric vector by the given set // VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint // of labels. Each label value will be constrained with the optional Constraint
// function, if provided. // function, if provided.
VariableLabels ConstrainableLabels VariableLabels ConstrainableLabels
} }
@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary {
} }
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels.names {
if n.Name == quantileLabel { if n == quantileLabel {
panic(errQuantileLabelNotAllowed) panic(errQuantileLabelNotAllowed)
} }
} }
@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap opts.BufCap = DefBufCap
} }
if opts.now == nil {
opts.now = time.Now
}
if len(opts.Objectives) == 0 { if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives. // Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{ s := &noObjectivesSummary{
@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}}, counts: [2]*summaryCounts{{}, {}},
} }
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s return s
} }
@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
} }
s.headStreamExpTime = time.Now().Add(s.streamDuration) s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ { for i := uint32(0); i < opts.AgeBuckets; i++ {
@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives) sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s return s
} }
@ -286,6 +295,8 @@ type summary struct {
headStream *quantile.Stream headStream *quantile.Stream
headStreamIdx int headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time headStreamExpTime, hotBufExpTime time.Time
createdTs *timestamppb.Timestamp
} }
func (s *summary) Desc() *Desc { func (s *summary) Desc() *Desc {
@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) {
} }
func (s *summary) Write(out *dto.Metric) error { func (s *summary) Write(out *dto.Metric) error {
sum := &dto.Summary{} sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.objectives)) qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock() s.bufMtx.Lock()
@ -440,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts counts [2]*summaryCounts
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (s *noObjectivesSummary) Desc() *Desc { func (s *noObjectivesSummary) Desc() *Desc {
@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
} }
sum := &dto.Summary{ sum := &dto.Summary{
SampleCount: proto.Uint64(count), SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: s.createdTs,
} }
out.Summary = sum out.Summary = sum
@ -681,6 +697,7 @@ type constSummary struct {
sum float64 sum float64
quantiles map[float64]float64 quantiles map[float64]float64
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
} }
func (s *constSummary) Desc() *Desc { func (s *constSummary) Desc() *Desc {
@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
} }
func (s *constSummary) Write(out *dto.Metric) error { func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{} sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.quantiles)) qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count) sum.SampleCount = proto.Uint64(s.count)
@ -737,7 +756,7 @@ func NewConstSummary(
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
return &constSummary{ return &constSummary{

View File

@ -14,6 +14,7 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"time" "time"
@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
} }
func (v *valueFunc) Write(out *dto.Metric) error { func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
} }
// NewConstMetric returns a metric with one fixed value that cannot be // NewConstMetric returns a metric with one fixed value that cannot be
@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil { if desc.err != nil {
return nil, desc.err return nil, desc.err
} }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err return nil, err
} }
metric := &dto.Metric{} metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err return nil, err
} }
@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m return m
} }
// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
// with created timestamp set and returns an error for other metric types.
func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
switch valueType {
case CounterValue:
break
default:
return nil, errors.New("created timestamps are only supported for counters")
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
return nil, err
}
return &constMetric{
desc: desc,
metric: metric,
}, nil
}
// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
// NewConstMetricWithCreatedTimestamp would have returned an error.
func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}
type constMetric struct { type constMetric struct {
desc *Desc desc *Desc
metric *dto.Metric metric *dto.Metric
@ -153,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair, labelPairs []*dto.LabelPair,
e *dto.Exemplar, e *dto.Exemplar,
m *dto.Metric, m *dto.Metric,
ct *timestamppb.Timestamp,
) error { ) error {
m.Label = labelPairs m.Label = labelPairs
switch t { switch t {
case CounterValue: case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue: case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)} m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue: case UntypedValue:
@ -176,19 +215,19 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec // This function is only needed for custom Metric implementations. See MetricVec
// example. // example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 { if totalLen == 0 {
// Super fast path. // Super fast path.
return nil return nil
} }
if len(desc.variableLabels) == 0 { if len(desc.variableLabels.names) == 0 {
// Moderately fast path. // Moderately fast path.
return desc.constLabelPairs return desc.constLabelPairs
} }
labelPairs := make([]*dto.LabelPair, 0, totalLen) labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, l := range desc.variableLabels { for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{ labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(l.Name), Name: proto.String(l),
Value: proto.String(labelValues[i]), Value: proto.String(labelValues[i]),
}) })
} }

View File

@ -20,24 +20,6 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in // MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block // their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec, // for implementations of vectors of a given metric type, like GaugeVec,
@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// See also the CounterVec example. // See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry) lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return false return false
@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See // This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods. // there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool { func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels) labels, closer := constrainLabels(m.desc, labels)
defer putLabelsToPool(labels) defer closer()
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector. // Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int { func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels) labels, closer := constrainLabels(m.desc, labels)
defer putLabelsToPool(labels) defer closer()
return m.metricMap.deleteByLabels(labels, m.curry) return m.metricMap.deleteByLabels(labels, m.curry)
} }
@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry oldCurry = m.curry
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[label.Name] val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok { if ok {
return nil, fmt.Errorf("label name %q is already curried", label.Name) return nil, fmt.Errorf("label name %q is already curried", labelName)
} }
newCurry = append(newCurry, oldCurry[iCurry]) newCurry = append(newCurry, oldCurry[iCurry])
iCurry++ iCurry++
@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok { if !ok {
continue // Label stays uncurried. continue // Label stays uncurried.
} }
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) newCurry = append(newCurry, curriedLabelValue{
i,
m.desc.variableLabels.constrain(labelName, val),
})
} }
} }
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation, // around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec. // for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels) labels, closer := constrainLabels(m.desc, labels)
defer putLabelsToPool(labels) defer closer()
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
} }
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry curry = m.curry
iVals, iCurry int iVals, iCurry int
) )
for i := 0; i < len(m.desc.variableLabels); i++ { for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value) h = m.hashAdd(h, curry[iCurry].value)
iCurry++ iCurry++
@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
} }
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry curry = m.curry
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[label.Name] val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
if ok { if ok {
return 0, fmt.Errorf("label name %q is already curried", label.Name) return 0, fmt.Errorf("label name %q is already curried", labelName)
} }
h = m.hashAdd(h, curry[iCurry].value) h = m.hashAdd(h, curry[iCurry].value)
iCurry++ iCurry++
} else { } else {
if !ok { if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label.Name) return 0, fmt.Errorf("label name %q missing in label map", labelName)
} }
h = m.hashAdd(h, val) h = m.hashAdd(h, val)
} }
@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels { for l, v := range labels {
// Check if the target label exists in our metrics and get the index. // Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
if validLabel { if validLabel {
// Check the value of that label against the target value. // Check the value of that label against the target value.
// We don't consider curried values in partial matches. // We don't consider curried values in partial matches.
@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false return false
} }
iCurry := 0 iCurry := 0
for i, k := range desc.variableLabels { for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value { if values[i] != curry[iCurry].value {
return false return false
@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++ iCurry++
continue continue
} }
if values[i] != labels[k.Name] { if values[i] != labels[k] {
return false return false
} }
} }
@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry)) labelValues := make([]string, len(labels)+len(curry))
iCurry := 0 iCurry := 0
for i, k := range desc.variableLabels { for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value labelValues[i] = curry[iCurry].value
iCurry++ iCurry++
continue continue
} }
labelValues[i] = labels[k.Name] labelValues[i] = labels[k]
} }
return labelValues return labelValues
} }
@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
return labelValues return labelValues
} }
func constrainLabels(desc *Desc, labels Labels) Labels { var labelsPool = &sync.Pool{
constrainedLabels := getLabelsFromPool() New: func() interface{} {
for l, v := range labels { return make(Labels)
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { },
v = desc.variableLabels[i].Constrain(v) }
}
constrainedLabels[l] = v func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return labels, func() {}
} }
return constrainedLabels constrainedLabels := labelsPool.Get().(Labels)
for l, v := range labels {
constrainedLabels[l] = desc.variableLabels.constrain(l, v)
}
return constrainedLabels, func() {
for k := range constrainedLabels {
delete(constrainedLabels, k)
}
labelsPool.Put(constrainedLabels)
}
} }
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return lvs
}
constrainedValues := make([]string, len(lvs)) constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ { for i := 0; i < len(lvs)+len(curry); i++ {
@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [
continue continue
} }
if i < len(desc.variableLabels) { if i < len(desc.variableLabels.names) {
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) constrainedValues[iLVs] = desc.variableLabels.constrain(
desc.variableLabels.names[i],
lvs[iLVs],
)
} else { } else {
constrainedValues[iLVs] = lvs[iLVs] constrainedValues[iLVs] = lvs[iLVs]
} }

View File

@ -215,8 +215,9 @@ type Counter struct {
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
} }
func (x *Counter) Reset() { func (x *Counter) Reset() {
@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar {
return nil return nil
} }
func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Quantile struct { type Quantile struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -325,9 +333,10 @@ type Summary struct {
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
} }
func (x *Summary) Reset() { func (x *Summary) Reset() {
@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile {
return nil return nil
} }
func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Untyped struct { type Untyped struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -439,7 +455,8 @@ type Histogram struct {
SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram. // Buckets for the conventional histogram.
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. // then each power of two is divided into 2^n logarithmic buckets.
@ -525,6 +542,13 @@ func (x *Histogram) GetBucket() []*Bucket {
return nil return nil
} }
func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
func (x *Histogram) GetSchema() int32 { func (x *Histogram) GetSchema() int32 {
if x != nil && x.Schema != nil { if x != nil && x.Schema != nil {
return *x.Schema return *x.Schema
@ -972,137 +996,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74,
} }
var ( var (
@ -1137,26 +1175,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
} }
var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
16, // [16:16] is the sub-list for method output_type 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
16, // [16:16] is the sub-list for method input_type 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
16, // [16:16] is the sub-list for extension type_name 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
16, // [16:16] is the sub-list for extension extendee 19, // [19:19] is the sub-list for method output_type
0, // [0:16] is the sub-list for field type_name 19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
} }
func init() { file_io_prometheus_client_metrics_proto_init() } func init() { file_io_prometheus_client_metrics_proto_init() }

View File

@ -2,6 +2,7 @@
linters: linters:
enable: enable:
- godot - godot
- misspell
- revive - revive
linter-settings: linter-settings:
@ -10,3 +11,5 @@ linter-settings:
exclude: exclude:
# Ignore "See: URL" # Ignore "See: URL"
- 'See:' - 'See:'
misspell:
locale: US

View File

@ -49,19 +49,19 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),) ifneq ($(shell command -v gotestsum > /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
endif endif
PROMU_VERSION ?= 0.14.0 PROMU_VERSION ?= 0.15.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.51.2 GOLANGCI_LINT_VERSION ?= v1.53.3
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -178,7 +178,7 @@ endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint)) ifeq (, $(shell command -v yamllint > /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .

View File

@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`. extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash ```bash
rm -rf fixtures rm -rf testdata/fixtures
make test make test
``` ```
Next, make the required changes to the extracted files in the `fixtures` directory. When Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using based on the updated `fixtures` directory. And finally, verify the changes using
`git diff fixtures.ttar`. `git diff testdata/fixtures.ttar`.

View File

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp")) data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
} }
} }

View File

@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line) parts := strings.Fields(line)
if len(parts) < 4 { if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
} }
node := strings.TrimRight(parts[1], ",") node := strings.TrimRight(parts[1], ",")
@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
bucketCount = arraySize bucketCount = arraySize
} else { } else {
if bucketCount != arraySize { if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
} }
} }
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
} }
} }

View File

@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info)) scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") { if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") { if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 { if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
cpu := commonCPUInfo cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32) v, err := strconv.ParseUint(match[1], 0, 32)
@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)

View File

@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading crypto %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
} }
return crypto, nil return crypto, nil
@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":") kv := strings.Split(text, ":")
if len(kv) != 2 { if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text) return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
} }
k := strings.TrimSpace(kv[0]) k := strings.TrimSpace(kv[0])

View File

@ -20,8 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to // FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures. // kernel data structures.
type FS struct { type FS struct {
proc fs.FS proc fs.FS
real bool isReal bool
} }
// DefaultMountPoint is the common mount point of the proc filesystem. // DefaultMountPoint is the common mount point of the proc filesystem.
@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) {
return FS{}, err return FS{}, err
} }
real, err := isRealProc(mountPoint) isReal, err := isRealProc(mountPoint)
if err != nil { if err != nil {
return FS{}, err return FS{}, err
} }
return FS{fs, real}, nil return FS{fs, isReal}, nil
} }

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build netbsd || openbsd || solaris || windows //go:build netbsd || openbsd || solaris || windows || nostatfs
// +build netbsd openbsd solaris windows // +build netbsd openbsd solaris windows nostatfs
package procfs package procfs

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows //go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs
// +build !netbsd,!openbsd,!solaris,!windows // +build !netbsd,!openbsd,!solaris,!windows,!nostatfs
package procfs package procfs

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b)) m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
} }
return *m, nil return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error { func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error var err error
if len(fields) < len(setFields) { if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
} }
for i := range setFields { for i := range setFields {
@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() { for s.Scan() {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) < 2 { if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
} }
switch fields[0] { switch fields[0] {

View File

@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46: case 46:
ip = net.ParseIP(s[1:40]) ip = net.ParseIP(s[1:40])
if ip == nil { if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
} }
default: default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
} }
portString := s[len(s)-4:] portString := s[len(s)-4:]
if len(portString) != 4 { if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
} }
port, err := strconv.ParseUint(portString, 16, 16) port, err := strconv.ParseUint(portString, 16, 16)
if err != nil { if err != nil {

View File

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3) loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes)) parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 { if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
} }
var err error var err error
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err) return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View File

@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line) deviceFields := strings.Fields(line)
if len(deviceFields) < 3 { if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
} }
mdName := deviceFields[0] // mdx mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
} }
// Failed disks have the suffix (F) & Spare disks have the suffix (S). // Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %w", err) return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else { } else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
} }
} }
} }
@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine) statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 { if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
sizeStr := statusFields[0] sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine) matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 { if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
} }
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
} }
down = int64(strings.Count(matches[4], "_")) down = int64(strings.Count(matches[4], "_"))
@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
} }
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
} }
// Get percentage complete // Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
} }
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil { if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
} }
// Get time expected left to complete // Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
} }
finish, err = strconv.ParseFloat(matches[1], 64) finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
} }
// Get recovery speed // Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
} }
speed, err = strconv.ParseFloat(matches[1], 64) speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil { if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
} }
return syncedBlocks, pct, finish, speed, nil return syncedBlocks, pct, finish, speed, nil

View File

@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
} }
return *m, nil return *m, nil
@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
// Each line has at least a name and value; we ignore the unit. // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) < 2 { if len(fields) < 2 {
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
} }
v, err := strconv.ParseUint(fields[1], 0, 64) v, err := strconv.ParseUint(fields[1], 0, 64)

View File

@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ") mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo) mountInfoLength := len(mountInfo)
if mountInfoLength < 10 { if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
} }
if mountInfo[mountInfoLength-4] != "-" { if mountInfo[mountInfoLength-4] != "-" {
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
} }
mount := &MountInfo{ mount := &MountInfo{
@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mount.MountID, err = strconv.Atoi(mountInfo[0]) mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse mount ID") return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
} }
mount.ParentID, err = strconv.Atoi(mountInfo[1]) mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse parent ID") return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
} }
// Has optional fields, which is a space separated list of values. // Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7 // Example: shared:2 master:7
if mountInfo[6] != "" { if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
} }
} }
return mount, nil return mount, nil

View File

@ -266,7 +266,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
if len(ss) > deviceEntryLen { if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics // Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type { if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
} }
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@ -290,7 +290,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
// device [device] mounted on [mount] with fstype [type] // device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) { func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen { if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss) return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
} }
// Check for specific words appearing at specific indices to ensure // Check for specific words appearing at specific indices to ensure
@ -308,7 +308,7 @@ func parseMount(ss []string) (*Mount, error) {
for _, f := range format { for _, f := range format {
if ss[f.i] != f.s { if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss) return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
} }
} }
@ -345,7 +345,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] { switch ss[0] {
case fieldOpts: case fieldOpts:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
if stats.Opts == nil { if stats.Opts == nil {
stats.Opts = map[string]string{} stats.Opts = map[string]string{}
@ -360,7 +360,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
case fieldAge: case fieldAge:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")
@ -371,7 +371,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d stats.Age = d
case fieldBytes: case fieldBytes:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
} }
bstats, err := parseNFSBytesStats(ss[1:]) bstats, err := parseNFSBytesStats(ss[1:])
if err != nil { if err != nil {
@ -381,7 +381,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats stats.Bytes = *bstats
case fieldEvents: case fieldEvents:
if len(ss) < 2 { if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
} }
estats, err := parseNFSEventsStats(ss[1:]) estats, err := parseNFSEventsStats(ss[1:])
if err != nil { if err != nil {
@ -391,7 +391,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Events = *estats stats.Events = *estats
case fieldTransport: case fieldTransport:
if len(ss) < 3 { if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
} }
tstats, err := parseNFSTransportStats(ss[1:], statVersion) tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@ -430,7 +430,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
// integer fields. // integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen { if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
} }
ns := make([]uint64, 0, fieldBytesLen) ns := make([]uint64, 0, fieldBytesLen)
@ -459,7 +459,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
// integer fields. // integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen { if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss) return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
} }
ns := make([]uint64, 0, fieldEventsLen) ns := make([]uint64, 0, fieldEventsLen)
@ -523,7 +523,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
} }
if len(ss) < minFields { if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
} }
// Skip string operation name for integers // Skip string operation name for integers
@ -576,10 +576,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" { } else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen expectedLength = fieldTransport10UDPLen
} else { } else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
} }
if len(ss) != expectedLength { if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
} }
case statVersion11: case statVersion11:
var expectedLength int var expectedLength int
@ -588,13 +588,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" { } else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen expectedLength = fieldTransport11UDPLen
} else { } else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
} }
if len(ss) != expectedLength { if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss)
} }
default: default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion)
} }
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay

View File

@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
} }
return stat, nil return stat, nil
@ -86,11 +86,12 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields) entries, err := util.ParseHexUint64s(fields)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err) return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
} }
numEntries := len(entries) numEntries := len(entries)
if numEntries < 16 || numEntries > 17 { if numEntries < 16 || numEntries > 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries) return nil,
fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
} }
stats := &ConntrackStatEntry{ stats := &ConntrackStatEntry{

View File

@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte var byteIP []byte
byteIP, err := hex.DecodeString(hexIP) byteIP, err := hex.DecodeString(hexIP)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
} }
switch len(byteIP) { switch len(byteIP) {
case 4: case 4:
@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) {
} }
return i, nil return i, nil
default: default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP) return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
} }
} }
@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{} line := &netIPSocketLine{}
if len(fields) < 10 { if len(fields) < 10 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 10 columns %q", "%w: Less than 10 columns found %q",
ErrFileParse,
strings.Join(fields, " "), strings.Join(fields, " "),
) )
} }
@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl // sl
s := strings.Split(fields[0], ":") s := strings.Split(fields[0], ":")
if len(s) != 2 { if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
} }
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
} }
// local_address // local_address
l := strings.Split(fields[1], ":") l := strings.Split(fields[1], ":")
if len(l) != 2 { if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
} }
if line.LocalAddr, err = parseIP(l[0]); err != nil { if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err return nil, err
} }
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
} }
// remote_address // remote_address
r := strings.Split(fields[2], ":") r := strings.Split(fields[2], ":")
if len(r) != 2 { if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
} }
if line.RemAddr, err = parseIP(r[0]); err != nil { if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err return nil, err
} }
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
} }
// st // st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
} }
// tx_queue and rx_queue // tx_queue and rx_queue
q := strings.Split(fields[4], ":") q := strings.Split(fields[4], ":")
if len(q) != 2 { if len(q) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q", "%w: Missing colon for tx/rx queues in socket line %q",
ErrFileParse,
fields[4], fields[4],
) )
} }
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
} }
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
} }
// uid // uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
} }
// inode // inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
} }
return line, nil return line, nil

View File

@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
} else if fields[6] == disabled { } else if fields[6] == disabled {
line.Slab = false line.Slab = false
} else { } else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
} }
line.ModuleName = fields[7] line.ModuleName = fields[7]
@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
} else if capabilities[i] == "n" { } else if capabilities[i] == "n" {
*capabilityFields[i] = false *capabilityFields[i] = false
} else { } else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i) return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
} }
} }
return nil return nil

143
vendor/github.com/prometheus/procfs/net_route.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
const (
blackholeRepresentation string = "*"
blackholeIfaceName string = "blackhole"
routeLineColumns int = 11
)
// A NetRouteLine represents one line from net/route.
type NetRouteLine struct {
Iface string
Destination uint32
Gateway uint32
Flags uint32
RefCnt uint32
Use uint32
Metric uint32
Mask uint32
MTU uint32
Window uint32
IRTT uint32
}
func (fs FS) NetRoute() ([]NetRouteLine, error) {
return readNetRoute(fs.proc.Path("net", "route"))
}
func readNetRoute(path string) ([]NetRouteLine, error) {
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
routelines, err := parseNetRoute(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
}
return routelines, nil
}
func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
var routelines []NetRouteLine
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
routeline, err := parseNetRouteLine(fields)
if err != nil {
return nil, err
}
routelines = append(routelines, *routeline)
}
return routelines, nil
}
func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
if len(fields) != routeLineColumns {
return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
}
iface := fields[0]
if iface == blackholeRepresentation {
iface = blackholeIfaceName
}
destination, err := strconv.ParseUint(fields[1], 16, 32)
if err != nil {
return nil, err
}
gateway, err := strconv.ParseUint(fields[2], 16, 32)
if err != nil {
return nil, err
}
flags, err := strconv.ParseUint(fields[3], 10, 32)
if err != nil {
return nil, err
}
refcnt, err := strconv.ParseUint(fields[4], 10, 32)
if err != nil {
return nil, err
}
use, err := strconv.ParseUint(fields[5], 10, 32)
if err != nil {
return nil, err
}
metric, err := strconv.ParseUint(fields[6], 10, 32)
if err != nil {
return nil, err
}
mask, err := strconv.ParseUint(fields[7], 16, 32)
if err != nil {
return nil, err
}
mtu, err := strconv.ParseUint(fields[8], 10, 32)
if err != nil {
return nil, err
}
window, err := strconv.ParseUint(fields[9], 10, 32)
if err != nil {
return nil, err
}
irtt, err := strconv.ParseUint(fields[10], 10, 32)
if err != nil {
return nil, err
}
routeline := &NetRouteLine{
Iface: iface,
Destination: uint32(destination),
Gateway: uint32(gateway),
Flags: uint32(flags),
RefCnt: uint32(refcnt),
Use: uint32(use),
Metric: uint32(metric),
Mask: uint32(mask),
MTU: uint32(mtu),
Window: uint32(window),
IRTT: uint32(irtt),
}
return routeline, nil
}

View File

@ -16,7 +16,6 @@ package procfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"strings" "strings"
@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
} }
return stat, nil return stat, nil
@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// Expect a minimum of a protocol and one key/value pair. // Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ") fields := strings.Split(s.Text(), " ")
if len(fields) < 3 { if len(fields) < 3 {
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
} }
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.
@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// parseSockstatKVs parses a string slice into a map of key/value pairs. // parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) { func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 { if len(kvs)%2 != 0 {
return nil, errors.New("odd number of fields in key/value pairs") return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
} }
// Iterate two values at a time to gather key/value pairs. // Iterate two values at a time to gather key/value pairs.

View File

@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
} }
return entries, nil return entries, nil
@ -83,7 +83,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat := SoftnetStat{} softnetStat := SoftnetStat{}
if width < minColumns { if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
} }
// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text() line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields) item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
} }
return &nu, nil return &nu, nil
@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
l := len(fields) l := len(fields)
if l < min { if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
} }
// Field offsets are as follows: // Field offsets are as follows:
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1]) users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
} }
flags, err := u.parseFlags(fields[3]) flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
} }
typ, err := u.parseType(fields[4]) typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
} }
state, err := u.parseState(fields[5]) state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inode, err = u.parseInode(fields[6]) inode, err = u.parseInode(fields[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
} }
} }

View File

@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b)) m, err := parseWireless(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse wireless: %w", err) return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
} }
return m, nil return m, nil
@ -97,64 +97,64 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
parts := strings.Split(line, ":") parts := strings.Split(line, ":")
if len(parts) != 2 { if len(parts) != 2 {
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line) return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
} }
name := strings.TrimSpace(parts[0]) name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1]) stats := strings.Fields(parts[1])
if len(stats) < 10 { if len(stats) < 10 {
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line) return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
} }
status, err := strconv.ParseUint(stats[0], 16, 16) status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid status in line %d: %q", n, line) return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
} }
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err) return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
} }
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err) return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
} }
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err) return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
} }
dnwid, err := strconv.Atoi(stats[4]) dnwid, err := strconv.Atoi(stats[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err) return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
} }
dcrypt, err := strconv.Atoi(stats[5]) dcrypt, err := strconv.Atoi(stats[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err) return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
} }
dfrag, err := strconv.Atoi(stats[6]) dfrag, err := strconv.Atoi(stats[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err) return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
} }
dretry, err := strconv.Atoi(stats[7]) dretry, err := strconv.Atoi(stats[7])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err) return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
} }
dmisc, err := strconv.Atoi(stats[8]) dmisc, err := strconv.Atoi(stats[8])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err) return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
} }
mbeacon, err := strconv.Atoi(stats[9]) mbeacon, err := strconv.Atoi(stats[9])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err) return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
} }
w := &Wireless{ w := &Wireless{
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err) return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
} }
return interfaces, nil return interfaces, nil

View File

@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) != 2 { if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
} }
name := fields[0] name := fields[0]

View File

@ -15,6 +15,7 @@ package procfs
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -35,6 +36,12 @@ type Proc struct {
// Procs represents a list of Proc structs. // Procs represents a list of Proc structs.
type Procs []Proc type Procs []Proc
var (
ErrFileParse = errors.New("Error Parsing File")
ErrFileRead = errors.New("Error Reading File")
ErrMountPoint = errors.New("Error Accessing Mount point")
)
func (p Procs) Len() int { return len(p) } func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@ -42,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process read via /proc/self. // Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) { func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint) fs, err := NewFS(DefaultMountPoint)
if err != nil { if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err return Proc{}, err
} }
return fs.Self() return fs.Self()
@ -104,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
p := Procs{} p := Procs{}
@ -205,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names { for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32) fd, err := strconv.ParseInt(n, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse fd %q: %w", n, err) return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
} }
fds[i] = uintptr(fd) fds[i] = uintptr(fd)
} }
@ -237,7 +244,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// a process. // a process.
func (p Proc) FileDescriptorsLen() (int, error) { func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.real { if p.fs.isReal {
stat, err := os.Stat(p.path("fd")) stat, err := os.Stat(p.path("fd"))
if err != nil { if err != nil {
return 0, err return 0, err
@ -290,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
} }
return names, nil return names, nil

View File

@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
fields := strings.SplitN(cgroupStr, ":", 3) fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 { if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
} }
cgroup := &Cgroup{ cgroup := &Cgroup{
@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
} }
cgroup.HierarchyID, err = strconv.Atoi(fields[0]) cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID") return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
} }
if fields[1] != "" { if fields[1] != "" {
ssNames := strings.Split(fields[1], ",") ssNames := strings.Split(fields[1], ",")

View File

@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
fields := strings.Fields(CgroupSummaryStr) fields := strings.Fields(CgroupSummaryStr)
// require at least 4 fields // require at least 4 fields
if len(fields) < 4 { if len(fields) < 4 {
return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
} }
CgroupSummary := &CgroupSummary{ CgroupSummary := &CgroupSummary{
@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
} }
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID") return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
} }
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Cgroup Num") return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
} }
CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse Enabled") return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
} }
return CgroupSummary, nil return CgroupSummary, nil
} }

View File

@ -111,7 +111,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
} }
return i, nil return i, nil
} }
return nil, fmt.Errorf("invalid inode entry: %q", line) return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
} }
// ProcFDInfos represents a list of ProcFDInfo structs. // ProcFDInfos represents a list of ProcFDInfo structs.

View File

@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) {
continue continue
} }
if len(parts) < 2 { if len(parts) < 2 {
return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
} }
intName := parts[0][:len(parts[0])-1] // remove trailing : intName := parts[0][:len(parts[0])-1] // remove trailing :

View File

@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields) //fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text()) fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields { if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
} }
switch fields[1] { switch fields[1] {
@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
} }
i, err := strconv.ParseUint(s, 10, 64) i, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
} }
return i, nil return i, nil
} }

View File

@ -65,7 +65,7 @@ type ProcMap struct {
func parseDevice(s string) (uint64, error) { func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":") toks := strings.Split(s, ":")
if len(toks) < 2 { if len(toks) < 2 {
return 0, fmt.Errorf("unexpected number of fields") return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks))
} }
major, err := strconv.ParseUint(toks[0], 16, 0) major, err := strconv.ParseUint(toks[0], 16, 0)
@ -95,7 +95,7 @@ func parseAddress(s string) (uintptr, error) {
func parseAddresses(s string) (uintptr, uintptr, error) { func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-") toks := strings.Split(s, "-")
if len(toks) < 2 { if len(toks) < 2 {
return 0, 0, fmt.Errorf("invalid address") return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse)
} }
saddr, err := parseAddress(toks[0]) saddr, err := parseAddress(toks[0])
@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) {
// parsePermissions parses a token and returns any that are set. // parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) { func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 { if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token") return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
} }
perms := ProcMapPermissions{} perms := ProcMapPermissions{}
@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) {
func parseProcMap(text string) (*ProcMap, error) { func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text) fields := strings.Fields(text)
if len(fields) < 5 { if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry") return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
} }
saddr, eaddr, err := parseAddresses(fields[0]) saddr, eaddr, err := parseAddresses(fields[0])

View File

@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
// Remove trailing :. // Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":") protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) { if len(nameParts) != len(valueParts) {
return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
fileName, protocol) ErrFileParse, fileName, protocol)
} }
for i := 1; i < len(nameParts); i++ { for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64) value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
} }
ns := make(Namespaces, len(names)) ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2) fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 { if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
} }
typ := fields[0] typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
} }
ns[name] = Namespace{typ, uint32(inode)} ns[name] = Namespace{typ, uint32(inode)}

View File

@ -61,14 +61,14 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil { if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
} }
return parsePSIStats(resource, bytes.NewReader(data)) return parsePSIStats(bytes.NewReader(data))
} }
// parsePSIStats parses the specified file for pressure stall information. // parsePSIStats parses the specified file for pressure stall information.
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{} psiStats := PSIStats{}
scanner := bufio.NewScanner(r) scanner := bufio.NewScanner(r)

View File

@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
} }
vBytes := vKBytes * 1024 vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes) s.addValue(k, vBytes)
return nil return nil
} }
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k { switch k {
case "Rss": case "Rss":
s.Rss += vUintBytes s.Rss += vUintBytes

View File

@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
// Remove trailing :. // Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":") protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) { if len(nameParts) != len(valueParts) {
return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
fileName, protocol) ErrFileParse, fileName, protocol)
} }
for i := 1; i < len(nameParts); i++ { for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64) value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -138,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) {
) )
if l < 0 || r < 0 { if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
} }
s.Comm = string(data[l+1 : r]) s.Comm = string(data[l+1 : r])

View File

@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f) vp := util.NewValueParser(f)
values[i] = vp.Int() values[i] = vp.Int()
if err := vp.Err(); err != nil { if err := vp.Err(); err != nil {
return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
} }
} }
return values, nil return values, nil

View File

@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) {
l := slabSpace.ReplaceAllString(line, " ") l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ") s := strings.Split(l, " ")
if len(s) != 16 { if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line) return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
} }
var err error var err error
i := &Slab{Name: s[0]} i := &Slab{Name: s[0]}

View File

@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
) )
if !scanner.Scan() { if !scanner.Scan() {
return Softirqs{}, fmt.Errorf("softirqs empty") return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
} }
for scanner.Scan() { for scanner.Scan() {
@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU)) softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TIMER:": case parts[0] == "TIMER:":
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU)) softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_TX:": case parts[0] == "NET_TX:":
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU)) softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "NET_RX:": case parts[0] == "NET_RX:":
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU)) softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "BLOCK:": case parts[0] == "BLOCK:":
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU)) softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "IRQ_POLL:": case parts[0] == "IRQ_POLL:":
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU)) softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "TASKLET:": case parts[0] == "TASKLET:":
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU)) softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "SCHED:": case parts[0] == "SCHED:":
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU)) softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "HRTIMER:": case parts[0] == "HRTIMER:":
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU)) softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "RCU:": case parts[0] == "RCU:":
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU)) softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU { for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
} }
} }
} }
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
} }
return softirqs, scanner.Err() return softirqs, scanner.Err()

View File

@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice) &cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
} }
if count == 0 { if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
} }
cpuStat.User /= userHZ cpuStat.User /= userHZ
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil { if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
} }
return cpuStat, cpuID, nil return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu) &softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil { if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
} }
return softIRQStat, total, nil return softIRQStat, total, nil
@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
err error err error
) )
// Increase default scanner buffer to handle very long `intr` lines.
buf := make([]byte, 0, 8*1024)
scanner.Buffer(buf, 1024*1024)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
parts := strings.Fields(scanner.Text()) parts := strings.Fields(scanner.Text())
@ -197,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch { switch {
case parts[0] == "btime": case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "intr": case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
} }
numberedIRQs := parts[2:] numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs)) stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs { for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
} }
} }
case parts[0] == "ctxt": case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "processes": case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_running": case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "procs_blocked": case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
} }
case parts[0] == "softirq": case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line) softIRQStats, total, err := parseSoftIRQStat(line)
@ -247,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
} }
return stat, nil return stat, nil

View File

@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) {
swapFields := strings.Fields(swapString) swapFields := strings.Fields(swapString)
swapLength := len(swapFields) swapLength := len(swapFields)
if swapLength < 5 { if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString) return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
} }
swap := &Swap{ swap := &Swap{
@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2]) swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
} }
swap.Used, err = strconv.Atoi(swapFields[3]) swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
} }
swap.Priority, err = strconv.Atoi(swapFields[4]) swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
} }
return swap, nil return swap, nil

View File

@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
} }
t := Procs{} t := Procs{}
@ -55,7 +55,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
continue continue
} }
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}}) t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
} }
return t, nil return t, nil
@ -67,12 +67,12 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
if _, err := os.Stat(taskPath); err != nil { if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
} }
// Thread returns a process for a given TID of Proc. // Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) { func (proc Proc) Thread(tid int) (Proc, error) {
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real} tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err return Proc{}, err
} }

View File

@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) {
return nil, err return nil, err
} }
if !file.Mode().IsDir() { if !file.Mode().IsDir() {
return nil, fmt.Errorf("%s is not a directory", path) return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
} }
files, err := os.ReadDir(path) files, err := os.ReadDir(path)

View File

@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo")) data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
} }
zoneinfo, err := parseZoneinfo(data) zoneinfo, err := parseZoneinfo(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
} }
return zoneinfo, nil return zoneinfo, nil
} }

10
vendor/modules.txt vendored
View File

@ -20,7 +20,7 @@ github.com/armon/go-metrics
# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a # github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
## explicit ## explicit
github.com/asaskevich/govalidator github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.45.16 # github.com/aws/aws-sdk-go v1.45.18
## explicit; go 1.11 ## explicit; go 1.11
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/auth/bearer
@ -512,15 +512,15 @@ github.com/pkg/xattr
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit ## explicit
github.com/pmezard/go-difflib/difflib github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.16.0 # github.com/prometheus/client_golang v1.17.0
## explicit; go 1.17 ## explicit; go 1.19
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors
github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil
github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_golang/prometheus/testutil/promlint
# github.com/prometheus/client_model v0.4.0 # github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16
## explicit; go 1.18 ## explicit; go 1.18
github.com/prometheus/client_model/go github.com/prometheus/client_model/go
# github.com/prometheus/common v0.44.0 # github.com/prometheus/common v0.44.0
@ -528,7 +528,7 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.10.1 # github.com/prometheus/procfs v0.11.1
## explicit; go 1.19 ## explicit; go 1.19
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs