mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
rebase: bump github.com/prometheus/client_golang from 1.14.0 to 1.15.1
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.14.0 to 1.15.1. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.14.0...v1.15.1) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
8265abc2c9
commit
d0626d42e0
10
go.mod
10
go.mod
@ -25,7 +25,7 @@ require (
|
|||||||
github.com/onsi/ginkgo/v2 v2.9.2
|
github.com/onsi/ginkgo/v2 v2.9.2
|
||||||
github.com/onsi/gomega v1.27.6
|
github.com/onsi/gomega v1.27.6
|
||||||
github.com/pkg/xattr v0.4.9
|
github.com/pkg/xattr v0.4.9
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.8.2
|
||||||
golang.org/x/crypto v0.8.0
|
golang.org/x/crypto v0.8.0
|
||||||
golang.org/x/net v0.9.0
|
golang.org/x/net v0.9.0
|
||||||
@ -103,7 +103,7 @@ require (
|
|||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
@ -119,8 +119,8 @@ require (
|
|||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.3.0 // indirect
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||||
github.com/spf13/cobra v1.6.0 // indirect
|
github.com/spf13/cobra v1.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
@ -136,7 +136,7 @@ require (
|
|||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
go.uber.org/zap v1.24.0 // indirect
|
go.uber.org/zap v1.24.0 // indirect
|
||||||
golang.org/x/oauth2 v0.4.0 // indirect
|
golang.org/x/oauth2 v0.5.0 // indirect
|
||||||
golang.org/x/term v0.7.0 // indirect
|
golang.org/x/term v0.7.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.9.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
|
18
go.sum
18
go.sum
@ -759,7 +759,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
|||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
@ -805,8 +805,9 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
|
|||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||||
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
|
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
|
||||||
@ -952,8 +953,9 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
|||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
|
||||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
|
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||||
|
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@ -971,8 +973,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
|
||||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
|
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||||
|
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
@ -983,8 +986,9 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
|
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||||
|
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
|
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
@ -1301,8 +1305,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s=
|
||||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -28,6 +28,8 @@ var (
|
|||||||
MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")}
|
MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")}
|
||||||
// MetricsGC allows only GC metrics to be collected from Go runtime.
|
// MetricsGC allows only GC metrics to be collected from Go runtime.
|
||||||
// e.g. go_gc_cycles_automatic_gc_cycles_total
|
// e.g. go_gc_cycles_automatic_gc_cycles_total
|
||||||
|
// NOTE: This does not include new class of "/cpu/classes/gc/..." metrics.
|
||||||
|
// Use custom metric rule to access those.
|
||||||
MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)}
|
MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)}
|
||||||
// MetricsMemory allows only memory metrics to be collected from Go runtime.
|
// MetricsMemory allows only memory metrics to be collected from Go runtime.
|
||||||
// e.g. go_memory_classes_heap_free_bytes
|
// e.g. go_memory_classes_heap_free_bytes
|
||||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -59,6 +59,18 @@ type ExemplarAdder interface {
|
|||||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
|
// CounterVecOpts bundles the options to create a CounterVec metric.
|
||||||
|
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type CounterVecOpts struct {
|
||||||
|
CounterOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Contraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation also implements ExemplarAdder. It is safe to
|
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||||
@ -174,16 +186,24 @@ type CounterVec struct {
|
|||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
desc := NewDesc(
|
return V2.NewCounterVec(CounterVecOpts{
|
||||||
|
CounterOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
|
||||||
|
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
|
||||||
}
|
}
|
||||||
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
|
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
46
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
46
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -14,20 +14,16 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
"github.com/cespare/xxhash/v2"
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
@ -54,9 +50,9 @@ type Desc struct {
|
|||||||
// constLabelPairs contains precalculated DTO label pairs based on
|
// constLabelPairs contains precalculated DTO label pairs based on
|
||||||
// the constant labels.
|
// the constant labels.
|
||||||
constLabelPairs []*dto.LabelPair
|
constLabelPairs []*dto.LabelPair
|
||||||
// variableLabels contains names of labels for which the metric
|
// variableLabels contains names of labels and normalization function for
|
||||||
// maintains variable values.
|
// which the metric maintains variable values.
|
||||||
variableLabels []string
|
variableLabels ConstrainedLabels
|
||||||
// id is a hash of the values of the ConstLabels and fqName. This
|
// id is a hash of the values of the ConstLabels and fqName. This
|
||||||
// must be unique among all registered descriptors and can therefore be
|
// must be unique among all registered descriptors and can therefore be
|
||||||
// used as an identifier of the descriptor.
|
// used as an identifier of the descriptor.
|
||||||
@ -80,10 +76,24 @@ type Desc struct {
|
|||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
// specified in the Desc. See the Collector example for a usage pattern.
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
|
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
|
//
|
||||||
|
// variableLabels only contain the label names and normalization functions. Their
|
||||||
|
// label values are variable and therefore not part of the Desc. (They are managed
|
||||||
|
// within the Metric.)
|
||||||
|
//
|
||||||
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
|
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
|
||||||
d := &Desc{
|
d := &Desc{
|
||||||
fqName: fqName,
|
fqName: fqName,
|
||||||
help: help,
|
help: help,
|
||||||
variableLabels: variableLabels,
|
variableLabels: variableLabels.constrainedLabels(),
|
||||||
}
|
}
|
||||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
// their sorted label names) plus the fqName (at position 0).
|
// their sorted label names) plus the fqName (at position 0).
|
||||||
labelValues := make([]string, 1, len(constLabels)+1)
|
labelValues := make([]string, 1, len(constLabels)+1)
|
||||||
labelValues[0] = fqName
|
labelValues[0] = fqName
|
||||||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
|
||||||
labelNameSet := map[string]struct{}{}
|
labelNameSet := map[string]struct{}{}
|
||||||
// First add only the const label names and sort them...
|
// First add only the const label names and sort them...
|
||||||
for labelName := range constLabels {
|
for labelName := range constLabels {
|
||||||
@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
// Now add the variable label names, but prefix them with something that
|
// Now add the variable label names, but prefix them with something that
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
for _, labelName := range variableLabels {
|
for _, label := range d.variableLabels {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(label.Name) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, "$"+labelName)
|
labelNames = append(labelNames, "$"+label.Name)
|
||||||
labelNameSet[labelName] = struct{}{}
|
labelNameSet[label.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
if len(labelNames) != len(labelNameSet) {
|
if len(labelNames) != len(labelNameSet) {
|
||||||
d.err = errors.New("duplicate label names")
|
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -55,6 +55,18 @@ type Gauge interface {
|
|||||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||||
type GaugeOpts Opts
|
type GaugeOpts Opts
|
||||||
|
|
||||||
|
// GaugeVecOpts bundles the options to create a GaugeVec metric.
|
||||||
|
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type GaugeVecOpts struct {
|
||||||
|
GaugeOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Contraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation is optimized for a fast Set method. If you have a
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
@ -138,16 +150,24 @@ type GaugeVec struct {
|
|||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
desc := NewDesc(
|
return V2.NewGaugeVec(GaugeVecOpts{
|
||||||
|
GaugeOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
|
||||||
|
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
|
||||||
}
|
}
|
||||||
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
7
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
7
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
@ -23,11 +23,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
51
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
51
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -22,10 +22,9 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
||||||
@ -469,6 +468,18 @@ type HistogramOpts struct {
|
|||||||
NativeHistogramMaxZeroThreshold float64
|
NativeHistogramMaxZeroThreshold float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistogramVecOpts bundles the options to create a HistogramVec metric.
|
||||||
|
// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type HistogramVecOpts struct {
|
||||||
|
HistogramOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Contraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||||
//
|
//
|
||||||
@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
|
|||||||
|
|
||||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
if n == bucketLabel {
|
if n.Name == bucketLabel {
|
||||||
panic(errBucketLabelNotAllowed)
|
panic(errBucketLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make buckets
|
// Finally we know the final length of h.upperBounds and can make buckets
|
||||||
// for both counts as well as exemplars:
|
// for both counts as well as exemplars:
|
||||||
h.counts[0] = &histogramCounts{
|
h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
|
||||||
buckets: make([]uint64, len(h.upperBounds)),
|
atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
|
||||||
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
|
atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
|
||||||
nativeHistogramSchema: h.nativeHistogramSchema,
|
h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
|
||||||
}
|
atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
|
||||||
h.counts[1] = &histogramCounts{
|
atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
|
||||||
buckets: make([]uint64, len(h.upperBounds)),
|
|
||||||
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
|
|
||||||
nativeHistogramSchema: h.nativeHistogramSchema,
|
|
||||||
}
|
|
||||||
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
@ -1034,15 +1041,23 @@ type HistogramVec struct {
|
|||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
desc := NewDesc(
|
return V2.NewHistogramVec(HistogramVecOpts{
|
||||||
|
HistogramOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
|
||||||
|
func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &HistogramVec{
|
return &HistogramVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newHistogram(desc, opts, lvs...)
|
return newHistogram(desc, opts.HistogramOpts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
72
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
72
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
@ -32,6 +32,78 @@ import (
|
|||||||
// create a Desc.
|
// create a Desc.
|
||||||
type Labels map[string]string
|
type Labels map[string]string
|
||||||
|
|
||||||
|
// ConstrainedLabels represents a label name and its constrain function
|
||||||
|
// to normalize label values. This type is commonly used when constructing
|
||||||
|
// metric vector Collectors.
|
||||||
|
type ConstrainedLabel struct {
|
||||||
|
Name string
|
||||||
|
Constraint func(string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cl ConstrainedLabel) Constrain(v string) string {
|
||||||
|
if cl.Constraint == nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return cl.Constraint(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstrainableLabels is an interface that allows creating of labels that can
|
||||||
|
// be optionally constrained.
|
||||||
|
//
|
||||||
|
// prometheus.V2().NewCounterVec(CounterVecOpts{
|
||||||
|
// CounterOpts: {...}, // Usual CounterOpts fields
|
||||||
|
// VariableLabels: []ConstrainedLabels{
|
||||||
|
// {Name: "A"},
|
||||||
|
// {Name: "B", Constraint: func(v string) string { ... }},
|
||||||
|
// },
|
||||||
|
// })
|
||||||
|
type ConstrainableLabels interface {
|
||||||
|
constrainedLabels() ConstrainedLabels
|
||||||
|
labelNames() []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstrainedLabels represents a collection of label name -> constrain function
|
||||||
|
// to normalize label values. This type is commonly used when constructing
|
||||||
|
// metric vector Collectors.
|
||||||
|
type ConstrainedLabels []ConstrainedLabel
|
||||||
|
|
||||||
|
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
|
||||||
|
return cls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cls ConstrainedLabels) labelNames() []string {
|
||||||
|
names := make([]string, len(cls))
|
||||||
|
for i, label := range cls {
|
||||||
|
names[i] = label.Name
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnconstrainedLabels represents collection of label without any constraint on
|
||||||
|
// their value. Thus, it is simply a collection of label names.
|
||||||
|
//
|
||||||
|
// UnconstrainedLabels([]string{ "A", "B" })
|
||||||
|
//
|
||||||
|
// is equivalent to
|
||||||
|
//
|
||||||
|
// ConstrainedLabels {
|
||||||
|
// { Name: "A" },
|
||||||
|
// { Name: "B" },
|
||||||
|
// }
|
||||||
|
type UnconstrainedLabels []string
|
||||||
|
|
||||||
|
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
|
||||||
|
constrainedLabels := make([]ConstrainedLabel, len(uls))
|
||||||
|
for i, l := range uls {
|
||||||
|
constrainedLabels[i] = ConstrainedLabel{Name: l}
|
||||||
|
}
|
||||||
|
return constrainedLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uls UnconstrainedLabels) labelNames() []string {
|
||||||
|
return uls
|
||||||
|
}
|
||||||
|
|
||||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||||
// label names.
|
// label names.
|
||||||
const reservedLabelPrefix = "__"
|
const reservedLabelPrefix = "__"
|
||||||
|
6
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
6
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -20,11 +20,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
||||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||||||
o.apply(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
// Curry the counter with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
return func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
addWithExemplar(
|
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
||||||
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
||||||
1,
|
l[label] = resolve(resp.Request.Context())
|
||||||
rtOpts.getExemplarFn(r.Context()),
|
}
|
||||||
)
|
addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
|||||||
o.apply(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
return func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
observeWithExemplar(
|
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
||||||
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
||||||
time.Since(start).Seconds(),
|
l[label] = resolve(resp.Request.Context())
|
||||||
rtOpts.getExemplarFn(r.Context()),
|
}
|
||||||
)
|
observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
101
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
101
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
observeWithExemplar(
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
time.Since(now).Seconds(),
|
l[label] = resolve(r.Context())
|
||||||
hOpts.getExemplarFn(r.Context()),
|
}
|
||||||
)
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
observeWithExemplar(
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
l[label] = resolve(r.Context())
|
||||||
time.Since(now).Seconds(),
|
}
|
||||||
hOpts.getExemplarFn(r.Context()),
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
// Curry the counter with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
addWithExemplar(
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
1,
|
l[label] = resolve(r.Context())
|
||||||
hOpts.getExemplarFn(r.Context()),
|
}
|
||||||
)
|
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
addWithExemplar(
|
|
||||||
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
1,
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
hOpts.getExemplarFn(r.Context()),
|
l[label] = resolve(r.Context())
|
||||||
)
|
}
|
||||||
|
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
d := newDelegator(w, func(status int) {
|
d := newDelegator(w, func(status int) {
|
||||||
observeWithExemplar(
|
l := labels(code, method, r.Method, status, hOpts.extraMethods...)
|
||||||
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
time.Since(now).Seconds(),
|
l[label] = resolve(r.Context())
|
||||||
hOpts.getExemplarFn(r.Context()),
|
}
|
||||||
)
|
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
||||||
})
|
})
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
}
|
}
|
||||||
@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
observeWithExemplar(
|
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
float64(size),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
hOpts.getExemplarFn(r.Context()),
|
l[label] = resolve(r.Context())
|
||||||
)
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
observeWithExemplar(
|
|
||||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
||||||
float64(size),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
hOpts.getExemplarFn(r.Context()),
|
l[label] = resolve(r.Context())
|
||||||
)
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
// Curry the observer with dynamic labels before checking the remaining labels.
|
||||||
|
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
observeWithExemplar(
|
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
||||||
float64(d.Written()),
|
for label, resolve := range hOpts.extraLabelsFromCtx {
|
||||||
hOpts.getExemplarFn(r.Context()),
|
l[label] = resolve(r.Context())
|
||||||
)
|
}
|
||||||
|
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
34
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
34
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
@ -24,14 +24,32 @@ type Option interface {
|
|||||||
apply(*options)
|
apply(*options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LabelValueFromCtx are used to compute the label value from request context.
|
||||||
|
// Context can be filled with values from request through middleware.
|
||||||
|
type LabelValueFromCtx func(ctx context.Context) string
|
||||||
|
|
||||||
// options store options for both a handler or round tripper.
|
// options store options for both a handler or round tripper.
|
||||||
type options struct {
|
type options struct {
|
||||||
extraMethods []string
|
extraMethods []string
|
||||||
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
||||||
|
extraLabelsFromCtx map[string]LabelValueFromCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultOptions() *options {
|
func defaultOptions() *options {
|
||||||
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
|
return &options{
|
||||||
|
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
|
||||||
|
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *options) emptyDynamicLabels() prometheus.Labels {
|
||||||
|
labels := prometheus.Labels{}
|
||||||
|
|
||||||
|
for label := range o.extraLabelsFromCtx {
|
||||||
|
labels[label] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return labels
|
||||||
}
|
}
|
||||||
|
|
||||||
type optionApplyFunc func(*options)
|
type optionApplyFunc func(*options)
|
||||||
@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
|
// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
|
||||||
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
|
// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
|
||||||
// will get instrumented without exemplar.
|
// metric will continue to observe/increment.
|
||||||
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
||||||
return optionApplyFunc(func(o *options) {
|
return optionApplyFunc(func(o *options) {
|
||||||
o.getExemplarFn = getExemplarFn
|
o.getExemplarFn = getExemplarFn
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
|
||||||
|
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
|
||||||
|
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
|
||||||
|
return optionApplyFunc(func(o *options) {
|
||||||
|
o.extraLabelsFromCtx[name] = valueFn
|
||||||
|
})
|
||||||
|
}
|
||||||
|
19
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -21,18 +21,17 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -933,6 +932,10 @@ func checkMetricConsistency(
|
|||||||
h.WriteString(lp.GetValue())
|
h.WriteString(lp.GetValue())
|
||||||
h.Write(separatorByteSlice)
|
h.Write(separatorByteSlice)
|
||||||
}
|
}
|
||||||
|
if dtoMetric.TimestampMs != nil {
|
||||||
|
h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
|
||||||
|
h.Write(separatorByteSlice)
|
||||||
|
}
|
||||||
hSum := h.Sum64()
|
hSum := h.Sum64()
|
||||||
if _, exists := metricHashes[hSum]; exists {
|
if _, exists := metricHashes[hSum]; exists {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
@ -962,7 +965,7 @@ func checkDescConsistency(
|
|||||||
copy(lpsFromDesc, desc.constLabelPairs)
|
copy(lpsFromDesc, desc.constLabelPairs)
|
||||||
for _, l := range desc.variableLabels {
|
for _, l := range desc.variableLabels {
|
||||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||||
Name: proto.String(l),
|
Name: proto.String(l.Name),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
||||||
|
39
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
39
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -22,11 +22,10 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/beorn7/perks/quantile"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// quantileLabel is used for the label that defines the quantile in a
|
// quantileLabel is used for the label that defines the quantile in a
|
||||||
@ -148,6 +147,18 @@ type SummaryOpts struct {
|
|||||||
BufCap uint32
|
BufCap uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryVecOpts bundles the options to create a SummaryVec metric.
|
||||||
|
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type SummaryVecOpts struct {
|
||||||
|
SummaryOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Contraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// Problem with the sliding-window decay algorithm... The Merge method of
|
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||||
// perk/quantile is actually not working as advertised - and it might be
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary {
|
|||||||
|
|
||||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
if n == quantileLabel {
|
if n.Name == quantileLabel {
|
||||||
panic(errQuantileLabelNotAllowed)
|
panic(errQuantileLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -530,20 +541,28 @@ type SummaryVec struct {
|
|||||||
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||||
// label name. NewSummaryVec will panic if this label name is used.
|
// label name. NewSummaryVec will panic if this label name is used.
|
||||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
for _, ln := range labelNames {
|
return V2.NewSummaryVec(SummaryVecOpts{
|
||||||
|
SummaryOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
|
||||||
|
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
|
||||||
|
for _, ln := range opts.VariableLabels.labelNames() {
|
||||||
if ln == quantileLabel {
|
if ln == quantileLabel {
|
||||||
panic(errQuantileLabelNotAllowed)
|
panic(errQuantileLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
desc := NewDesc(
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &SummaryVec{
|
return &SummaryVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newSummary(desc, opts, lvs...)
|
return newSummary(desc, opts.SummaryOpts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
@ -238,6 +238,7 @@ func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error)
|
|||||||
func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error {
|
func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error {
|
||||||
if metricNames != nil {
|
if metricNames != nil {
|
||||||
got = filterMetrics(got, metricNames)
|
got = filterMetrics(got, metricNames)
|
||||||
|
expected = filterMetrics(expected, metricNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
return compare(got, expected)
|
return compare(got, expected)
|
||||||
|
28
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
@ -23,7 +23,9 @@ type Timer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||||
// duration in seconds. Timer is usually used to time a function call in the
|
// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
|
||||||
|
// later on will be also supported.
|
||||||
|
// Timer is usually used to time a function call in the
|
||||||
// following way:
|
// following way:
|
||||||
//
|
//
|
||||||
// func TimeMe() {
|
// func TimeMe() {
|
||||||
@ -31,6 +33,14 @@ type Timer struct {
|
|||||||
// defer timer.ObserveDuration()
|
// defer timer.ObserveDuration()
|
||||||
// // Do actual work.
|
// // Do actual work.
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
|
// or
|
||||||
|
//
|
||||||
|
// func TimeMeWithExemplar() {
|
||||||
|
// timer := NewTimer(myHistogram)
|
||||||
|
// defer timer.ObserveDurationWithExemplar(exemplar)
|
||||||
|
// // Do actual work.
|
||||||
|
// }
|
||||||
func NewTimer(o Observer) *Timer {
|
func NewTimer(o Observer) *Timer {
|
||||||
return &Timer{
|
return &Timer{
|
||||||
begin: time.Now(),
|
begin: time.Now(),
|
||||||
@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
|
|||||||
}
|
}
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
|
||||||
|
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
|
||||||
|
// be casted to ExemplarObserver.
|
||||||
|
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
|
||||||
|
d := time.Since(t.begin)
|
||||||
|
eo, ok := t.observer.(ExemplarObserver)
|
||||||
|
if ok && exemplar != nil {
|
||||||
|
eo.ObserveWithExemplar(d.Seconds(), exemplar)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
if t.observer != nil {
|
||||||
|
t.observer.Observe(d.Seconds())
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
10
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -19,13 +19,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||||||
return desc.constLabelPairs
|
return desc.constLabelPairs
|
||||||
}
|
}
|
||||||
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
||||||
for i, n := range desc.variableLabels {
|
for i, l := range desc.variableLabels {
|
||||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||||
Name: proto.String(n),
|
Name: proto.String(l.Name),
|
||||||
Value: proto.String(labelValues[i]),
|
Value: proto.String(labelValues[i]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
54
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
54
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -72,6 +72,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
|||||||
// with a performance overhead (for creating and processing the Labels map).
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
// See also the CounterVec example.
|
// See also the CounterVec example.
|
||||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
|
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
||||||
h, err := m.hashLabelValues(lvs)
|
h, err := m.hashLabelValues(lvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -91,6 +92,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|||||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||||
// there for pros and cons of the two methods.
|
// there for pros and cons of the two methods.
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
func (m *MetricVec) Delete(labels Labels) bool {
|
||||||
|
labels = constrainLabels(m.desc, labels)
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -106,6 +108,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
|||||||
// Note that curried labels will never be matched if deleting from the curried vector.
|
// Note that curried labels will never be matched if deleting from the curried vector.
|
||||||
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
||||||
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||||
|
labels = constrainLabels(m.desc, labels)
|
||||||
return m.metricMap.deleteByLabels(labels, m.curry)
|
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,10 +148,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
iCurry int
|
iCurry int
|
||||||
)
|
)
|
||||||
for i, label := range m.desc.variableLabels {
|
for i, label := range m.desc.variableLabels {
|
||||||
val, ok := labels[label]
|
val, ok := labels[label.Name]
|
||||||
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||||
if ok {
|
if ok {
|
||||||
return nil, fmt.Errorf("label name %q is already curried", label)
|
return nil, fmt.Errorf("label name %q is already curried", label.Name)
|
||||||
}
|
}
|
||||||
newCurry = append(newCurry, oldCurry[iCurry])
|
newCurry = append(newCurry, oldCurry[iCurry])
|
||||||
iCurry++
|
iCurry++
|
||||||
@ -156,7 +159,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue // Label stays uncurried.
|
continue // Label stays uncurried.
|
||||||
}
|
}
|
||||||
newCurry = append(newCurry, curriedLabelValue{i, val})
|
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||||
@ -199,6 +202,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
// a wrapper around MetricVec, implementing a vector for a specific Metric
|
// a wrapper around MetricVec, implementing a vector for a specific Metric
|
||||||
// implementation, for example GaugeVec.
|
// implementation, for example GaugeVec.
|
||||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
|
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
||||||
h, err := m.hashLabelValues(lvs)
|
h, err := m.hashLabelValues(lvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -224,6 +228,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|||||||
// around MetricVec, implementing a vector for a specific Metric implementation,
|
// around MetricVec, implementing a vector for a specific Metric implementation,
|
||||||
// for example GaugeVec.
|
// for example GaugeVec.
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||||
|
labels = constrainLabels(m.desc, labels)
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -266,16 +271,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
|||||||
iCurry int
|
iCurry int
|
||||||
)
|
)
|
||||||
for i, label := range m.desc.variableLabels {
|
for i, label := range m.desc.variableLabels {
|
||||||
val, ok := labels[label]
|
val, ok := labels[label.Name]
|
||||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
if ok {
|
if ok {
|
||||||
return 0, fmt.Errorf("label name %q is already curried", label)
|
return 0, fmt.Errorf("label name %q is already curried", label.Name)
|
||||||
}
|
}
|
||||||
h = m.hashAdd(h, curry[iCurry].value)
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
iCurry++
|
iCurry++
|
||||||
} else {
|
} else {
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
return 0, fmt.Errorf("label name %q missing in label map", label.Name)
|
||||||
}
|
}
|
||||||
h = m.hashAdd(h, val)
|
h = m.hashAdd(h, val)
|
||||||
}
|
}
|
||||||
@ -453,7 +458,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
|
|||||||
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
for l, v := range labels {
|
for l, v := range labels {
|
||||||
// Check if the target label exists in our metrics and get the index.
|
// Check if the target label exists in our metrics and get the index.
|
||||||
varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
|
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
|
||||||
if validLabel {
|
if validLabel {
|
||||||
// Check the value of that label against the target value.
|
// Check the value of that label against the target value.
|
||||||
// We don't consider curried values in partial matches.
|
// We don't consider curried values in partial matches.
|
||||||
@ -605,7 +610,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
|
|||||||
iCurry++
|
iCurry++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if values[i] != labels[k] {
|
if values[i] != labels[k.Name] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -621,7 +626,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
|
|||||||
iCurry++
|
iCurry++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labelValues[i] = labels[k]
|
labelValues[i] = labels[k.Name]
|
||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
@ -640,3 +645,34 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
|||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func constrainLabels(desc *Desc, labels Labels) Labels {
|
||||||
|
constrainedValues := make(Labels, len(labels))
|
||||||
|
for l, v := range labels {
|
||||||
|
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
|
||||||
|
constrainedValues[l] = desc.variableLabels[i].Constrain(v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
constrainedValues[l] = v
|
||||||
|
}
|
||||||
|
return constrainedValues
|
||||||
|
}
|
||||||
|
|
||||||
|
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
|
||||||
|
constrainedValues := make([]string, len(lvs))
|
||||||
|
var iCurry, iLVs int
|
||||||
|
for i := 0; i < len(lvs)+len(curry); i++ {
|
||||||
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
|
iCurry++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(desc.variableLabels) {
|
||||||
|
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
|
||||||
|
} else {
|
||||||
|
constrainedValues[iLVs] = lvs[iLVs]
|
||||||
|
}
|
||||||
|
iLVs++
|
||||||
|
}
|
||||||
|
return constrainedValues
|
||||||
|
}
|
||||||
|
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
type v2 struct{}
|
||||||
|
|
||||||
|
// V2 is a struct that can be referenced to access experimental API that might
|
||||||
|
// be present in v2 of client golang someday. It offers extended functionality
|
||||||
|
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
|
||||||
|
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
|
||||||
|
// in the same codebase.
|
||||||
|
var V2 = v2{}
|
8
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@ -17,12 +17,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
|||||||
constLabels[ln] = lv
|
constLabels[ln] = lv
|
||||||
}
|
}
|
||||||
// NewDesc will do remaining validations.
|
// NewDesc will do remaining validations.
|
||||||
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||||
// Propagate errors if there was any. This will override any errer
|
// Propagate errors if there was any. This will override any errer
|
||||||
// created by NewDesc above, i.e. earlier errors get precedence.
|
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||||
if desc.err != nil {
|
if desc.err != nil {
|
||||||
|
34
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
34
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -115,33 +115,29 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
|||||||
// textDecoder implements the Decoder interface for the text protocol.
|
// textDecoder implements the Decoder interface for the text protocol.
|
||||||
type textDecoder struct {
|
type textDecoder struct {
|
||||||
r io.Reader
|
r io.Reader
|
||||||
p TextParser
|
fams map[string]*dto.MetricFamily
|
||||||
fams []*dto.MetricFamily
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode implements the Decoder interface.
|
// Decode implements the Decoder interface.
|
||||||
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||||
// TODO(fabxc): Wrap this as a line reader to make streaming safer.
|
if d.err == nil {
|
||||||
if len(d.fams) == 0 {
|
// Read all metrics in one shot.
|
||||||
// No cached metric families, read everything and parse metrics.
|
var p TextParser
|
||||||
fams, err := d.p.TextToMetricFamilies(d.r)
|
d.fams, d.err = p.TextToMetricFamilies(d.r)
|
||||||
if err != nil {
|
// If we don't get an error, store io.EOF for the end.
|
||||||
return err
|
if d.err == nil {
|
||||||
}
|
d.err = io.EOF
|
||||||
if len(fams) == 0 {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
d.fams = make([]*dto.MetricFamily, 0, len(fams))
|
|
||||||
for _, f := range fams {
|
|
||||||
d.fams = append(d.fams, f)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Pick off one MetricFamily per Decode until there's nothing left.
|
||||||
*v = *d.fams[0]
|
for key, fam := range d.fams {
|
||||||
d.fams = d.fams[1:]
|
*v = *fam
|
||||||
|
delete(d.fams, key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return d.err
|
||||||
|
}
|
||||||
|
|
||||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||||
// decoded by the wrapped Decoder.
|
// decoded by the wrapped Decoder.
|
||||||
|
3
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
3
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -44,7 +43,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
bufPool = sync.Pool{
|
bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return bufio.NewWriter(ioutil.Discard)
|
return bufio.NewWriter(io.Discard)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
numBufPool = sync.Pool{
|
numBufPool = sync.Pool{
|
||||||
|
8
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
8
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) {
|
|||||||
func (p *TextParser) startOfLine() stateFn {
|
func (p *TextParser) startOfLine() stateFn {
|
||||||
p.lineCount++
|
p.lineCount++
|
||||||
if p.skipBlankTab(); p.err != nil {
|
if p.skipBlankTab(); p.err != nil {
|
||||||
// End of input reached. This is the only case where
|
// This is the only place that we expect to see io.EOF,
|
||||||
// that is not an error but a signal that we are done.
|
// which is not an error but the signal that we are done.
|
||||||
|
// Any other error that happens to align with the start of
|
||||||
|
// a line is still an error.
|
||||||
|
if p.err == io.EOF {
|
||||||
p.err = nil
|
p.err = nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
switch p.currentByte {
|
switch p.currentByte {
|
||||||
|
2
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
2
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package goautoneg
|
package goautoneg
|
||||||
|
|
||||||
|
91
vendor/github.com/prometheus/common/model/time.go
generated
vendored
91
vendor/github.com/prometheus/common/model/time.go
generated
vendored
@ -18,7 +18,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -183,54 +182,78 @@ func (d *Duration) Type() string {
|
|||||||
return "duration"
|
return "duration"
|
||||||
}
|
}
|
||||||
|
|
||||||
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
|
func isdigit(c byte) bool { return c >= '0' && c <= '9' }
|
||||||
|
|
||||||
|
// Units are required to go in order from biggest to smallest.
|
||||||
|
// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
|
||||||
|
var unitMap = map[string]struct {
|
||||||
|
pos int
|
||||||
|
mult uint64
|
||||||
|
}{
|
||||||
|
"ms": {7, uint64(time.Millisecond)},
|
||||||
|
"s": {6, uint64(time.Second)},
|
||||||
|
"m": {5, uint64(time.Minute)},
|
||||||
|
"h": {4, uint64(time.Hour)},
|
||||||
|
"d": {3, uint64(24 * time.Hour)},
|
||||||
|
"w": {2, uint64(7 * 24 * time.Hour)},
|
||||||
|
"y": {1, uint64(365 * 24 * time.Hour)},
|
||||||
|
}
|
||||||
|
|
||||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||||
func ParseDuration(durationStr string) (Duration, error) {
|
func ParseDuration(s string) (Duration, error) {
|
||||||
switch durationStr {
|
switch s {
|
||||||
case "0":
|
case "0":
|
||||||
// Allow 0 without a unit.
|
// Allow 0 without a unit.
|
||||||
return 0, nil
|
return 0, nil
|
||||||
case "":
|
case "":
|
||||||
return 0, errors.New("empty duration string")
|
return 0, errors.New("empty duration string")
|
||||||
}
|
}
|
||||||
matches := durationRE.FindStringSubmatch(durationStr)
|
|
||||||
if matches == nil {
|
|
||||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
|
||||||
}
|
|
||||||
var dur time.Duration
|
|
||||||
|
|
||||||
// Parse the match at pos `pos` in the regex and use `mult` to turn that
|
orig := s
|
||||||
// into ms, then add that value to the total parsed duration.
|
var dur uint64
|
||||||
var overflowErr error
|
lastUnitPos := 0
|
||||||
m := func(pos int, mult time.Duration) {
|
|
||||||
if matches[pos] == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n, _ := strconv.Atoi(matches[pos])
|
|
||||||
|
|
||||||
|
for s != "" {
|
||||||
|
if !isdigit(s[0]) {
|
||||||
|
return 0, fmt.Errorf("not a valid duration string: %q", orig)
|
||||||
|
}
|
||||||
|
// Consume [0-9]*
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s) && isdigit(s[i]); i++ {
|
||||||
|
}
|
||||||
|
v, err := strconv.ParseUint(s[:i], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("not a valid duration string: %q", orig)
|
||||||
|
}
|
||||||
|
s = s[i:]
|
||||||
|
|
||||||
|
// Consume unit.
|
||||||
|
for i = 0; i < len(s) && !isdigit(s[i]); i++ {
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
return 0, fmt.Errorf("not a valid duration string: %q", orig)
|
||||||
|
}
|
||||||
|
u := s[:i]
|
||||||
|
s = s[i:]
|
||||||
|
unit, ok := unitMap[u]
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
|
||||||
|
}
|
||||||
|
if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
|
||||||
|
return 0, fmt.Errorf("not a valid duration string: %q", orig)
|
||||||
|
}
|
||||||
|
lastUnitPos = unit.pos
|
||||||
// Check if the provided duration overflows time.Duration (> ~ 290years).
|
// Check if the provided duration overflows time.Duration (> ~ 290years).
|
||||||
if n > int((1<<63-1)/mult/time.Millisecond) {
|
if v > 1<<63/unit.mult {
|
||||||
overflowErr = errors.New("duration out of range")
|
return 0, errors.New("duration out of range")
|
||||||
}
|
}
|
||||||
d := time.Duration(n) * time.Millisecond
|
dur += v * unit.mult
|
||||||
dur += d * mult
|
if dur > 1<<63-1 {
|
||||||
|
return 0, errors.New("duration out of range")
|
||||||
if dur < 0 {
|
|
||||||
overflowErr = errors.New("duration out of range")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return Duration(dur), nil
|
||||||
m(2, 1000*60*60*24*365) // y
|
|
||||||
m(4, 1000*60*60*24*7) // w
|
|
||||||
m(6, 1000*60*60*24) // d
|
|
||||||
m(8, 1000*60*60) // h
|
|
||||||
m(10, 1000*60) // m
|
|
||||||
m(12, 1000) // s
|
|
||||||
m(14, 1) // ms
|
|
||||||
|
|
||||||
return Duration(dur), overflowErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d Duration) String() string {
|
func (d Duration) String() string {
|
||||||
|
226
vendor/github.com/prometheus/common/model/value.go
generated
vendored
226
vendor/github.com/prometheus/common/model/value.go
generated
vendored
@ -16,20 +16,12 @@ package model
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
|
||||||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
|
||||||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
|
||||||
// of 0, which is possible to appear in a real SamplePair and thus not
|
|
||||||
// suitable to signal a non-existing SamplePair.
|
|
||||||
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
|
||||||
|
|
||||||
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||||
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||||
@ -38,82 +30,14 @@ var (
|
|||||||
ZeroSample = Sample{Timestamp: Earliest}
|
ZeroSample = Sample{Timestamp: Earliest}
|
||||||
)
|
)
|
||||||
|
|
||||||
// A SampleValue is a representation of a value for a given sample at a given
|
// Sample is a sample pair associated with a metric. A single sample must either
|
||||||
// time.
|
// define Value or Histogram but not both. Histogram == nil implies the Value
|
||||||
type SampleValue float64
|
// field is used, otherwise it should be ignored.
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
|
||||||
func (v SampleValue) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler.
|
|
||||||
func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
|
||||||
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
|
||||||
return fmt.Errorf("sample value must be a quoted string")
|
|
||||||
}
|
|
||||||
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*v = SampleValue(f)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
|
||||||
// that v==o is false if both are NaN. If you want the conventional float
|
|
||||||
// behavior, use == to compare two SampleValues.
|
|
||||||
func (v SampleValue) Equal(o SampleValue) bool {
|
|
||||||
if v == o {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v SampleValue) String() string {
|
|
||||||
return strconv.FormatFloat(float64(v), 'f', -1, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SamplePair pairs a SampleValue with a Timestamp.
|
|
||||||
type SamplePair struct {
|
|
||||||
Timestamp Time
|
|
||||||
Value SampleValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
|
||||||
func (s SamplePair) MarshalJSON() ([]byte, error) {
|
|
||||||
t, err := json.Marshal(s.Timestamp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v, err := json.Marshal(s.Value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler.
|
|
||||||
func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
|
||||||
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
|
|
||||||
return json.Unmarshal(b, &v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
|
||||||
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
|
|
||||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
|
||||||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SamplePair) String() string {
|
|
||||||
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sample is a sample pair associated with a metric.
|
|
||||||
type Sample struct {
|
type Sample struct {
|
||||||
Metric Metric `json:"metric"`
|
Metric Metric `json:"metric"`
|
||||||
Value SampleValue `json:"value"`
|
Value SampleValue `json:"value"`
|
||||||
Timestamp Time `json:"timestamp"`
|
Timestamp Time `json:"timestamp"`
|
||||||
|
Histogram *SampleHistogram `json:"histogram"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal compares first the metrics, then the timestamp, then the value. The
|
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||||
@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool {
|
|||||||
if !s.Timestamp.Equal(o.Timestamp) {
|
if !s.Timestamp.Equal(o.Timestamp) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if s.Histogram != nil {
|
||||||
|
return s.Histogram.Equal(o.Histogram)
|
||||||
|
}
|
||||||
return s.Value.Equal(o.Value)
|
return s.Value.Equal(o.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Sample) String() string {
|
func (s Sample) String() string {
|
||||||
|
if s.Histogram != nil {
|
||||||
|
return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
|
||||||
|
Timestamp: s.Timestamp,
|
||||||
|
Histogram: s.Histogram,
|
||||||
|
})
|
||||||
|
}
|
||||||
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
|
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
|
||||||
Timestamp: s.Timestamp,
|
Timestamp: s.Timestamp,
|
||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
@ -142,6 +74,19 @@ func (s Sample) String() string {
|
|||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
// MarshalJSON implements json.Marshaler.
|
||||||
func (s Sample) MarshalJSON() ([]byte, error) {
|
func (s Sample) MarshalJSON() ([]byte, error) {
|
||||||
|
if s.Histogram != nil {
|
||||||
|
v := struct {
|
||||||
|
Metric Metric `json:"metric"`
|
||||||
|
Histogram SampleHistogramPair `json:"histogram"`
|
||||||
|
}{
|
||||||
|
Metric: s.Metric,
|
||||||
|
Histogram: SampleHistogramPair{
|
||||||
|
Timestamp: s.Timestamp,
|
||||||
|
Histogram: s.Histogram,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return json.Marshal(&v)
|
||||||
|
}
|
||||||
v := struct {
|
v := struct {
|
||||||
Metric Metric `json:"metric"`
|
Metric Metric `json:"metric"`
|
||||||
Value SamplePair `json:"value"`
|
Value SamplePair `json:"value"`
|
||||||
@ -152,7 +97,6 @@ func (s Sample) MarshalJSON() ([]byte, error) {
|
|||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.Marshal(&v)
|
return json.Marshal(&v)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,12 +105,17 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
|
|||||||
v := struct {
|
v := struct {
|
||||||
Metric Metric `json:"metric"`
|
Metric Metric `json:"metric"`
|
||||||
Value SamplePair `json:"value"`
|
Value SamplePair `json:"value"`
|
||||||
|
Histogram SampleHistogramPair `json:"histogram"`
|
||||||
}{
|
}{
|
||||||
Metric: s.Metric,
|
Metric: s.Metric,
|
||||||
Value: SamplePair{
|
Value: SamplePair{
|
||||||
Timestamp: s.Timestamp,
|
Timestamp: s.Timestamp,
|
||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
},
|
},
|
||||||
|
Histogram: SampleHistogramPair{
|
||||||
|
Timestamp: s.Timestamp,
|
||||||
|
Histogram: s.Histogram,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &v); err != nil {
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.Metric = v.Metric
|
s.Metric = v.Metric
|
||||||
|
if v.Histogram.Histogram != nil {
|
||||||
|
s.Timestamp = v.Histogram.Timestamp
|
||||||
|
s.Histogram = v.Histogram.Histogram
|
||||||
|
} else {
|
||||||
s.Timestamp = v.Value.Timestamp
|
s.Timestamp = v.Value.Timestamp
|
||||||
s.Value = v.Value.Value
|
s.Value = v.Value.Value
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -223,78 +177,74 @@ func (s Samples) Equal(o Samples) bool {
|
|||||||
type SampleStream struct {
|
type SampleStream struct {
|
||||||
Metric Metric `json:"metric"`
|
Metric Metric `json:"metric"`
|
||||||
Values []SamplePair `json:"values"`
|
Values []SamplePair `json:"values"`
|
||||||
|
Histograms []SampleHistogramPair `json:"histograms"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss SampleStream) String() string {
|
func (ss SampleStream) String() string {
|
||||||
vals := make([]string, len(ss.Values))
|
valuesLength := len(ss.Values)
|
||||||
|
vals := make([]string, valuesLength+len(ss.Histograms))
|
||||||
for i, v := range ss.Values {
|
for i, v := range ss.Values {
|
||||||
vals[i] = v.String()
|
vals[i] = v.String()
|
||||||
}
|
}
|
||||||
|
for i, v := range ss.Histograms {
|
||||||
|
vals[i+valuesLength] = v.String()
|
||||||
|
}
|
||||||
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
|
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Value is a generic interface for values resulting from a query evaluation.
|
func (ss SampleStream) MarshalJSON() ([]byte, error) {
|
||||||
type Value interface {
|
if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
|
||||||
Type() ValueType
|
v := struct {
|
||||||
String() string
|
Metric Metric `json:"metric"`
|
||||||
|
Values []SamplePair `json:"values"`
|
||||||
|
Histograms []SampleHistogramPair `json:"histograms"`
|
||||||
|
}{
|
||||||
|
Metric: ss.Metric,
|
||||||
|
Values: ss.Values,
|
||||||
|
Histograms: ss.Histograms,
|
||||||
|
}
|
||||||
|
return json.Marshal(&v)
|
||||||
|
} else if len(ss.Histograms) > 0 {
|
||||||
|
v := struct {
|
||||||
|
Metric Metric `json:"metric"`
|
||||||
|
Histograms []SampleHistogramPair `json:"histograms"`
|
||||||
|
}{
|
||||||
|
Metric: ss.Metric,
|
||||||
|
Histograms: ss.Histograms,
|
||||||
|
}
|
||||||
|
return json.Marshal(&v)
|
||||||
|
} else {
|
||||||
|
v := struct {
|
||||||
|
Metric Metric `json:"metric"`
|
||||||
|
Values []SamplePair `json:"values"`
|
||||||
|
}{
|
||||||
|
Metric: ss.Metric,
|
||||||
|
Values: ss.Values,
|
||||||
|
}
|
||||||
|
return json.Marshal(&v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Matrix) Type() ValueType { return ValMatrix }
|
func (ss *SampleStream) UnmarshalJSON(b []byte) error {
|
||||||
func (Vector) Type() ValueType { return ValVector }
|
v := struct {
|
||||||
func (*Scalar) Type() ValueType { return ValScalar }
|
Metric Metric `json:"metric"`
|
||||||
func (*String) Type() ValueType { return ValString }
|
Values []SamplePair `json:"values"`
|
||||||
|
Histograms []SampleHistogramPair `json:"histograms"`
|
||||||
type ValueType int
|
}{
|
||||||
|
Metric: ss.Metric,
|
||||||
const (
|
Values: ss.Values,
|
||||||
ValNone ValueType = iota
|
Histograms: ss.Histograms,
|
||||||
ValScalar
|
|
||||||
ValVector
|
|
||||||
ValMatrix
|
|
||||||
ValString
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
|
||||||
func (et ValueType) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(et.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (et *ValueType) UnmarshalJSON(b []byte) error {
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
var s string
|
|
||||||
if err := json.Unmarshal(b, &s); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch s {
|
|
||||||
case "<ValNone>":
|
|
||||||
*et = ValNone
|
|
||||||
case "scalar":
|
|
||||||
*et = ValScalar
|
|
||||||
case "vector":
|
|
||||||
*et = ValVector
|
|
||||||
case "matrix":
|
|
||||||
*et = ValMatrix
|
|
||||||
case "string":
|
|
||||||
*et = ValString
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown value type %q", s)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ValueType) String() string {
|
ss.Metric = v.Metric
|
||||||
switch e {
|
ss.Values = v.Values
|
||||||
case ValNone:
|
ss.Histograms = v.Histograms
|
||||||
return "<ValNone>"
|
|
||||||
case ValScalar:
|
return nil
|
||||||
return "scalar"
|
|
||||||
case ValVector:
|
|
||||||
return "vector"
|
|
||||||
case ValMatrix:
|
|
||||||
return "matrix"
|
|
||||||
case ValString:
|
|
||||||
return "string"
|
|
||||||
}
|
|
||||||
panic("ValueType.String: unhandled value type")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scalar is a scalar value evaluated at the set timestamp.
|
// Scalar is a scalar value evaluated at the set timestamp.
|
||||||
|
100
vendor/github.com/prometheus/common/model/value_float.go
generated
vendored
Normal file
100
vendor/github.com/prometheus/common/model/value_float.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||||
|
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||||
|
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||||
|
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||||
|
// suitable to signal a non-existing SamplePair.
|
||||||
|
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SampleValue is a representation of a value for a given sample at a given
|
||||||
|
// time.
|
||||||
|
type SampleValue float64
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
func (v SampleValue) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(v.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler.
|
||||||
|
func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
||||||
|
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||||
|
return fmt.Errorf("sample value must be a quoted string")
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = SampleValue(f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
||||||
|
// that v==o is false if both are NaN. If you want the conventional float
|
||||||
|
// behavior, use == to compare two SampleValues.
|
||||||
|
func (v SampleValue) Equal(o SampleValue) bool {
|
||||||
|
if v == o {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v SampleValue) String() string {
|
||||||
|
return strconv.FormatFloat(float64(v), 'f', -1, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SamplePair pairs a SampleValue with a Timestamp.
|
||||||
|
type SamplePair struct {
|
||||||
|
Timestamp Time
|
||||||
|
Value SampleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SamplePair) MarshalJSON() ([]byte, error) {
|
||||||
|
t, err := json.Marshal(s.Timestamp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v, err := json.Marshal(s.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler.
|
||||||
|
func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
||||||
|
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
|
||||||
|
return json.Unmarshal(b, &v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||||
|
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
|
||||||
|
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||||
|
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SamplePair) String() string {
|
||||||
|
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
|
||||||
|
}
|
178
vendor/github.com/prometheus/common/model/value_histogram.go
generated
vendored
Normal file
178
vendor/github.com/prometheus/common/model/value_histogram.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FloatString float64
|
||||||
|
|
||||||
|
func (v FloatString) String() string {
|
||||||
|
return strconv.FormatFloat(float64(v), 'f', -1, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v FloatString) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(v.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *FloatString) UnmarshalJSON(b []byte) error {
|
||||||
|
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||||
|
return fmt.Errorf("float value must be a quoted string")
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = FloatString(f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HistogramBucket struct {
|
||||||
|
Boundaries int32
|
||||||
|
Lower FloatString
|
||||||
|
Upper FloatString
|
||||||
|
Count FloatString
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s HistogramBucket) MarshalJSON() ([]byte, error) {
|
||||||
|
b, err := json.Marshal(s.Boundaries)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l, err := json.Marshal(s.Lower)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u, err := json.Marshal(s.Upper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c, err := json.Marshal(s.Count)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
|
||||||
|
tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
|
||||||
|
wantLen := len(tmp)
|
||||||
|
if err := json.Unmarshal(buf, &tmp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if gotLen := len(tmp); gotLen != wantLen {
|
||||||
|
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
|
||||||
|
return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b HistogramBucket) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
|
||||||
|
upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
|
||||||
|
if lowerInclusive {
|
||||||
|
sb.WriteRune('[')
|
||||||
|
} else {
|
||||||
|
sb.WriteRune('(')
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
|
||||||
|
if upperInclusive {
|
||||||
|
sb.WriteRune(']')
|
||||||
|
} else {
|
||||||
|
sb.WriteRune(')')
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&sb, ":%v", b.Count)
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type HistogramBuckets []*HistogramBucket
|
||||||
|
|
||||||
|
func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
|
||||||
|
if len(s) != len(o) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, bucket := range s {
|
||||||
|
if !bucket.Equal(o[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type SampleHistogram struct {
|
||||||
|
Count FloatString `json:"count"`
|
||||||
|
Sum FloatString `json:"sum"`
|
||||||
|
Buckets HistogramBuckets `json:"buckets"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SampleHistogram) String() string {
|
||||||
|
return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
|
||||||
|
return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
|
||||||
|
}
|
||||||
|
|
||||||
|
type SampleHistogramPair struct {
|
||||||
|
Timestamp Time
|
||||||
|
// Histogram should never be nil, it's only stored as pointer for efficiency.
|
||||||
|
Histogram *SampleHistogram
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
|
||||||
|
if s.Histogram == nil {
|
||||||
|
return nil, fmt.Errorf("histogram is nil")
|
||||||
|
}
|
||||||
|
t, err := json.Marshal(s.Timestamp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v, err := json.Marshal(s.Histogram)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
|
||||||
|
tmp := []interface{}{&s.Timestamp, &s.Histogram}
|
||||||
|
wantLen := len(tmp)
|
||||||
|
if err := json.Unmarshal(buf, &tmp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if gotLen := len(tmp); gotLen != wantLen {
|
||||||
|
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
|
||||||
|
}
|
||||||
|
if s.Histogram == nil {
|
||||||
|
return fmt.Errorf("histogram is null")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SampleHistogramPair) String() string {
|
||||||
|
return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
|
||||||
|
return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
|
||||||
|
}
|
83
vendor/github.com/prometheus/common/model/value_type.go
generated
vendored
Normal file
83
vendor/github.com/prometheus/common/model/value_type.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Value is a generic interface for values resulting from a query evaluation.
|
||||||
|
type Value interface {
|
||||||
|
Type() ValueType
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Matrix) Type() ValueType { return ValMatrix }
|
||||||
|
func (Vector) Type() ValueType { return ValVector }
|
||||||
|
func (*Scalar) Type() ValueType { return ValScalar }
|
||||||
|
func (*String) Type() ValueType { return ValString }
|
||||||
|
|
||||||
|
type ValueType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ValNone ValueType = iota
|
||||||
|
ValScalar
|
||||||
|
ValVector
|
||||||
|
ValMatrix
|
||||||
|
ValString
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
func (et ValueType) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(et.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (et *ValueType) UnmarshalJSON(b []byte) error {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch s {
|
||||||
|
case "<ValNone>":
|
||||||
|
*et = ValNone
|
||||||
|
case "scalar":
|
||||||
|
*et = ValScalar
|
||||||
|
case "vector":
|
||||||
|
*et = ValVector
|
||||||
|
case "matrix":
|
||||||
|
*et = ValMatrix
|
||||||
|
case "string":
|
||||||
|
*et = ValString
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown value type %q", s)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ValueType) String() string {
|
||||||
|
switch e {
|
||||||
|
case ValNone:
|
||||||
|
return "<ValNone>"
|
||||||
|
case ValScalar:
|
||||||
|
return "scalar"
|
||||||
|
case ValVector:
|
||||||
|
return "vector"
|
||||||
|
case ValMatrix:
|
||||||
|
return "matrix"
|
||||||
|
case ValString:
|
||||||
|
return "string"
|
||||||
|
}
|
||||||
|
panic("ValueType.String: unhandled value type")
|
||||||
|
}
|
9
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
9
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.13.0
|
PROMU_VERSION ?= 0.14.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.45.2
|
GOLANGCI_LINT_VERSION ?= v1.49.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
# If we're in CI and there is an Actions file, that means the linter
|
# If we're in CI and there is an Actions file, that means the linter
|
||||||
# is being run in Actions, so we don't need to run it here.
|
# is being run in Actions, so we don't need to run it here.
|
||||||
ifeq (,$(CIRCLE_JOB))
|
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||||
|
GOLANGCI_LINT :=
|
||||||
|
else ifeq (,$(CIRCLE_JOB))
|
||||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
36
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
36
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
|
|||||||
return cpuinfo, nil
|
return cpuinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||||
|
// find the first "processor" line
|
||||||
|
firstLine := firstNonEmptyLine(scanner)
|
||||||
|
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||||
|
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||||
|
}
|
||||||
|
field := strings.SplitN(firstLine, ": ", 2)
|
||||||
|
cpuinfo := []CPUInfo{}
|
||||||
|
systemType := field[1]
|
||||||
|
i := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if !strings.Contains(line, ":") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
field := strings.SplitN(line, ": ", 2)
|
||||||
|
switch strings.TrimSpace(field[0]) {
|
||||||
|
case "processor":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
i = int(v)
|
||||||
|
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||||
|
cpuinfo[i].Processor = uint(v)
|
||||||
|
cpuinfo[i].VendorID = systemType
|
||||||
|
case "CPU Family":
|
||||||
|
cpuinfo[i].CPUFamily = field[1]
|
||||||
|
case "Model Name":
|
||||||
|
cpuinfo[i].ModelName = field[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cpuinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
|
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||||
|
|
||||||
|
19
vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
var parseCPUInfo = parseCPUInfoLoong
|
4
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
4
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
@ -11,8 +11,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
|
//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
|
||||||
// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
|
1
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
1
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
@ -41,5 +41,4 @@
|
|||||||
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
|
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
|
||||||
// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
|
// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
package procfs
|
package procfs
|
||||||
|
1
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
1
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -284,6 +284,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
||||||
|
//
|
||||||
// device [device] mounted on [mount] with fstype [type]
|
// device [device] mounted on [mount] with fstype [type]
|
||||||
func parseMount(ss []string) (*Mount, error) {
|
func parseMount(ss []string) (*Mount, error) {
|
||||||
if len(ss) < deviceEntryLen {
|
if len(ss) < deviceEntryLen {
|
||||||
|
66
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
66
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -27,8 +27,9 @@ import (
|
|||||||
// For the proc file format details,
|
// For the proc file format details,
|
||||||
// See:
|
// See:
|
||||||
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
|
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
|
||||||
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
|
||||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
|
||||||
|
// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
|
||||||
|
|
||||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
|
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
|
||||||
type SoftnetStat struct {
|
type SoftnetStat struct {
|
||||||
@ -38,6 +39,18 @@ type SoftnetStat struct {
|
|||||||
Dropped uint32
|
Dropped uint32
|
||||||
// Number of times processing packets ran out of quota.
|
// Number of times processing packets ran out of quota.
|
||||||
TimeSqueezed uint32
|
TimeSqueezed uint32
|
||||||
|
// Number of collision occur while obtaining device lock while transmitting.
|
||||||
|
CPUCollision uint32
|
||||||
|
// Number of times cpu woken up received_rps.
|
||||||
|
ReceivedRps uint32
|
||||||
|
// number of times flow limit has been reached.
|
||||||
|
FlowLimitCount uint32
|
||||||
|
// Softnet backlog status.
|
||||||
|
SoftnetBacklogLen uint32
|
||||||
|
// CPU id owning this softnet_data.
|
||||||
|
Index uint32
|
||||||
|
// softnet_data's Width.
|
||||||
|
Width int
|
||||||
}
|
}
|
||||||
|
|
||||||
var softNetProcFile = "net/softnet_stat"
|
var softNetProcFile = "net/softnet_stat"
|
||||||
@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
|||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
columns := strings.Fields(s.Text())
|
columns := strings.Fields(s.Text())
|
||||||
width := len(columns)
|
width := len(columns)
|
||||||
|
softnetStat := SoftnetStat{}
|
||||||
|
|
||||||
if width < minColumns {
|
if width < minColumns {
|
||||||
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
|
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only parse the first three columns at the moment.
|
// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
|
||||||
us, err := parseHexUint32s(columns[0:3])
|
if width >= minColumns {
|
||||||
|
us, err := parseHexUint32s(columns[0:9])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stats = append(stats, SoftnetStat{
|
softnetStat.Processed = us[0]
|
||||||
Processed: us[0],
|
softnetStat.Dropped = us[1]
|
||||||
Dropped: us[1],
|
softnetStat.TimeSqueezed = us[2]
|
||||||
TimeSqueezed: us[2],
|
softnetStat.CPUCollision = us[8]
|
||||||
})
|
}
|
||||||
|
|
||||||
|
// Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
|
||||||
|
if width >= 10 {
|
||||||
|
us, err := parseHexUint32s(columns[9:10])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
softnetStat.ReceivedRps = us[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
|
||||||
|
if width >= 11 {
|
||||||
|
us, err := parseHexUint32s(columns[10:11])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
softnetStat.FlowLimitCount = us[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
|
||||||
|
if width >= 13 {
|
||||||
|
us, err := parseHexUint32s(columns[11:13])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
softnetStat.SoftnetBacklogLen = us[0]
|
||||||
|
softnetStat.Index = us[1]
|
||||||
|
}
|
||||||
|
softnetStat.Width = width
|
||||||
|
stats = append(stats, softnetStat)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
|
33
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
33
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
@ -15,6 +15,7 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -42,12 +43,29 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
netStatFile := NetStat{
|
procNetstat, err := parseNetstat(file)
|
||||||
Filename: filepath.Base(filePath),
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
procNetstat.Filename = filepath.Base(filePath)
|
||||||
|
|
||||||
|
netStatsTotal = append(netStatsTotal, procNetstat)
|
||||||
|
}
|
||||||
|
return netStatsTotal, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNetstat parses the metrics from `/proc/net/stat/` file
|
||||||
|
// and returns a NetStat structure.
|
||||||
|
func parseNetstat(r io.Reader) (NetStat, error) {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
netStat = NetStat{
|
||||||
Stats: make(map[string][]uint64),
|
Stats: make(map[string][]uint64),
|
||||||
}
|
}
|
||||||
scanner := bufio.NewScanner(file)
|
)
|
||||||
|
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
|
|
||||||
// First string is always a header for stats
|
// First string is always a header for stats
|
||||||
var headers []string
|
var headers []string
|
||||||
headers = append(headers, strings.Fields(scanner.Text())...)
|
headers = append(headers, strings.Fields(scanner.Text())...)
|
||||||
@ -57,12 +75,11 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
for num, counter := range strings.Fields(scanner.Text()) {
|
for num, counter := range strings.Fields(scanner.Text()) {
|
||||||
value, err := strconv.ParseUint(counter, 16, 64)
|
value, err := strconv.ParseUint(counter, 16, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return NetStat{}, err
|
||||||
}
|
}
|
||||||
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
|
netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
netStatsTotal = append(netStatsTotal, netStatFile)
|
|
||||||
}
|
return netStat, nil
|
||||||
return netStatsTotal, nil
|
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
|
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
||||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||||
|
98
vendor/github.com/prometheus/procfs/proc_interrupts.go
generated
vendored
Normal file
98
vendor/github.com/prometheus/procfs/proc_interrupts.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Interrupt represents a single interrupt line.
|
||||||
|
type Interrupt struct {
|
||||||
|
// Info is the type of interrupt.
|
||||||
|
Info string
|
||||||
|
// Devices is the name of the device that is located at that IRQ
|
||||||
|
Devices string
|
||||||
|
// Values is the number of interrupts per CPU.
|
||||||
|
Values []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
|
||||||
|
// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
|
||||||
|
// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
|
||||||
|
type Interrupts map[string]Interrupt
|
||||||
|
|
||||||
|
// Interrupts creates a new instance from a given Proc instance.
|
||||||
|
func (p Proc) Interrupts() (Interrupts, error) {
|
||||||
|
data, err := util.ReadFileNoStat(p.path("interrupts"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseInterrupts(bytes.NewReader(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInterrupts(r io.Reader) (Interrupts, error) {
|
||||||
|
var (
|
||||||
|
interrupts = Interrupts{}
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
)
|
||||||
|
|
||||||
|
if !scanner.Scan() {
|
||||||
|
return nil, errors.New("interrupts empty")
|
||||||
|
}
|
||||||
|
cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
if len(parts) == 0 { // skip empty lines
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
|
||||||
|
}
|
||||||
|
intName := parts[0][:len(parts[0])-1] // remove trailing :
|
||||||
|
|
||||||
|
if len(parts) == 2 {
|
||||||
|
interrupts[intName] = Interrupt{
|
||||||
|
Info: "",
|
||||||
|
Devices: "",
|
||||||
|
Values: []string{
|
||||||
|
parts[1],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
intr := Interrupt{
|
||||||
|
Values: parts[1 : cpuNum+1],
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
|
||||||
|
intr.Info = parts[cpuNum+1]
|
||||||
|
intr.Devices = strings.Join(parts[cpuNum+2:], " ")
|
||||||
|
} else {
|
||||||
|
intr.Info = strings.Join(parts[cpuNum+1:], " ")
|
||||||
|
}
|
||||||
|
interrupts[intName] = intr
|
||||||
|
}
|
||||||
|
|
||||||
|
return interrupts, scanner.Err()
|
||||||
|
}
|
491
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
491
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
@ -33,139 +33,140 @@ type ProcNetstat struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type TcpExt struct { // nolint:revive
|
type TcpExt struct { // nolint:revive
|
||||||
SyncookiesSent float64
|
SyncookiesSent *float64
|
||||||
SyncookiesRecv float64
|
SyncookiesRecv *float64
|
||||||
SyncookiesFailed float64
|
SyncookiesFailed *float64
|
||||||
EmbryonicRsts float64
|
EmbryonicRsts *float64
|
||||||
PruneCalled float64
|
PruneCalled *float64
|
||||||
RcvPruned float64
|
RcvPruned *float64
|
||||||
OfoPruned float64
|
OfoPruned *float64
|
||||||
OutOfWindowIcmps float64
|
OutOfWindowIcmps *float64
|
||||||
LockDroppedIcmps float64
|
LockDroppedIcmps *float64
|
||||||
ArpFilter float64
|
ArpFilter *float64
|
||||||
TW float64
|
TW *float64
|
||||||
TWRecycled float64
|
TWRecycled *float64
|
||||||
TWKilled float64
|
TWKilled *float64
|
||||||
PAWSActive float64
|
PAWSActive *float64
|
||||||
PAWSEstab float64
|
PAWSEstab *float64
|
||||||
DelayedACKs float64
|
DelayedACKs *float64
|
||||||
DelayedACKLocked float64
|
DelayedACKLocked *float64
|
||||||
DelayedACKLost float64
|
DelayedACKLost *float64
|
||||||
ListenOverflows float64
|
ListenOverflows *float64
|
||||||
ListenDrops float64
|
ListenDrops *float64
|
||||||
TCPHPHits float64
|
TCPHPHits *float64
|
||||||
TCPPureAcks float64
|
TCPPureAcks *float64
|
||||||
TCPHPAcks float64
|
TCPHPAcks *float64
|
||||||
TCPRenoRecovery float64
|
TCPRenoRecovery *float64
|
||||||
TCPSackRecovery float64
|
TCPSackRecovery *float64
|
||||||
TCPSACKReneging float64
|
TCPSACKReneging *float64
|
||||||
TCPSACKReorder float64
|
TCPSACKReorder *float64
|
||||||
TCPRenoReorder float64
|
TCPRenoReorder *float64
|
||||||
TCPTSReorder float64
|
TCPTSReorder *float64
|
||||||
TCPFullUndo float64
|
TCPFullUndo *float64
|
||||||
TCPPartialUndo float64
|
TCPPartialUndo *float64
|
||||||
TCPDSACKUndo float64
|
TCPDSACKUndo *float64
|
||||||
TCPLossUndo float64
|
TCPLossUndo *float64
|
||||||
TCPLostRetransmit float64
|
TCPLostRetransmit *float64
|
||||||
TCPRenoFailures float64
|
TCPRenoFailures *float64
|
||||||
TCPSackFailures float64
|
TCPSackFailures *float64
|
||||||
TCPLossFailures float64
|
TCPLossFailures *float64
|
||||||
TCPFastRetrans float64
|
TCPFastRetrans *float64
|
||||||
TCPSlowStartRetrans float64
|
TCPSlowStartRetrans *float64
|
||||||
TCPTimeouts float64
|
TCPTimeouts *float64
|
||||||
TCPLossProbes float64
|
TCPLossProbes *float64
|
||||||
TCPLossProbeRecovery float64
|
TCPLossProbeRecovery *float64
|
||||||
TCPRenoRecoveryFail float64
|
TCPRenoRecoveryFail *float64
|
||||||
TCPSackRecoveryFail float64
|
TCPSackRecoveryFail *float64
|
||||||
TCPRcvCollapsed float64
|
TCPRcvCollapsed *float64
|
||||||
TCPDSACKOldSent float64
|
TCPDSACKOldSent *float64
|
||||||
TCPDSACKOfoSent float64
|
TCPDSACKOfoSent *float64
|
||||||
TCPDSACKRecv float64
|
TCPDSACKRecv *float64
|
||||||
TCPDSACKOfoRecv float64
|
TCPDSACKOfoRecv *float64
|
||||||
TCPAbortOnData float64
|
TCPAbortOnData *float64
|
||||||
TCPAbortOnClose float64
|
TCPAbortOnClose *float64
|
||||||
TCPAbortOnMemory float64
|
TCPAbortOnMemory *float64
|
||||||
TCPAbortOnTimeout float64
|
TCPAbortOnTimeout *float64
|
||||||
TCPAbortOnLinger float64
|
TCPAbortOnLinger *float64
|
||||||
TCPAbortFailed float64
|
TCPAbortFailed *float64
|
||||||
TCPMemoryPressures float64
|
TCPMemoryPressures *float64
|
||||||
TCPMemoryPressuresChrono float64
|
TCPMemoryPressuresChrono *float64
|
||||||
TCPSACKDiscard float64
|
TCPSACKDiscard *float64
|
||||||
TCPDSACKIgnoredOld float64
|
TCPDSACKIgnoredOld *float64
|
||||||
TCPDSACKIgnoredNoUndo float64
|
TCPDSACKIgnoredNoUndo *float64
|
||||||
TCPSpuriousRTOs float64
|
TCPSpuriousRTOs *float64
|
||||||
TCPMD5NotFound float64
|
TCPMD5NotFound *float64
|
||||||
TCPMD5Unexpected float64
|
TCPMD5Unexpected *float64
|
||||||
TCPMD5Failure float64
|
TCPMD5Failure *float64
|
||||||
TCPSackShifted float64
|
TCPSackShifted *float64
|
||||||
TCPSackMerged float64
|
TCPSackMerged *float64
|
||||||
TCPSackShiftFallback float64
|
TCPSackShiftFallback *float64
|
||||||
TCPBacklogDrop float64
|
TCPBacklogDrop *float64
|
||||||
PFMemallocDrop float64
|
PFMemallocDrop *float64
|
||||||
TCPMinTTLDrop float64
|
TCPMinTTLDrop *float64
|
||||||
TCPDeferAcceptDrop float64
|
TCPDeferAcceptDrop *float64
|
||||||
IPReversePathFilter float64
|
IPReversePathFilter *float64
|
||||||
TCPTimeWaitOverflow float64
|
TCPTimeWaitOverflow *float64
|
||||||
TCPReqQFullDoCookies float64
|
TCPReqQFullDoCookies *float64
|
||||||
TCPReqQFullDrop float64
|
TCPReqQFullDrop *float64
|
||||||
TCPRetransFail float64
|
TCPRetransFail *float64
|
||||||
TCPRcvCoalesce float64
|
TCPRcvCoalesce *float64
|
||||||
TCPOFOQueue float64
|
TCPRcvQDrop *float64
|
||||||
TCPOFODrop float64
|
TCPOFOQueue *float64
|
||||||
TCPOFOMerge float64
|
TCPOFODrop *float64
|
||||||
TCPChallengeACK float64
|
TCPOFOMerge *float64
|
||||||
TCPSYNChallenge float64
|
TCPChallengeACK *float64
|
||||||
TCPFastOpenActive float64
|
TCPSYNChallenge *float64
|
||||||
TCPFastOpenActiveFail float64
|
TCPFastOpenActive *float64
|
||||||
TCPFastOpenPassive float64
|
TCPFastOpenActiveFail *float64
|
||||||
TCPFastOpenPassiveFail float64
|
TCPFastOpenPassive *float64
|
||||||
TCPFastOpenListenOverflow float64
|
TCPFastOpenPassiveFail *float64
|
||||||
TCPFastOpenCookieReqd float64
|
TCPFastOpenListenOverflow *float64
|
||||||
TCPFastOpenBlackhole float64
|
TCPFastOpenCookieReqd *float64
|
||||||
TCPSpuriousRtxHostQueues float64
|
TCPFastOpenBlackhole *float64
|
||||||
BusyPollRxPackets float64
|
TCPSpuriousRtxHostQueues *float64
|
||||||
TCPAutoCorking float64
|
BusyPollRxPackets *float64
|
||||||
TCPFromZeroWindowAdv float64
|
TCPAutoCorking *float64
|
||||||
TCPToZeroWindowAdv float64
|
TCPFromZeroWindowAdv *float64
|
||||||
TCPWantZeroWindowAdv float64
|
TCPToZeroWindowAdv *float64
|
||||||
TCPSynRetrans float64
|
TCPWantZeroWindowAdv *float64
|
||||||
TCPOrigDataSent float64
|
TCPSynRetrans *float64
|
||||||
TCPHystartTrainDetect float64
|
TCPOrigDataSent *float64
|
||||||
TCPHystartTrainCwnd float64
|
TCPHystartTrainDetect *float64
|
||||||
TCPHystartDelayDetect float64
|
TCPHystartTrainCwnd *float64
|
||||||
TCPHystartDelayCwnd float64
|
TCPHystartDelayDetect *float64
|
||||||
TCPACKSkippedSynRecv float64
|
TCPHystartDelayCwnd *float64
|
||||||
TCPACKSkippedPAWS float64
|
TCPACKSkippedSynRecv *float64
|
||||||
TCPACKSkippedSeq float64
|
TCPACKSkippedPAWS *float64
|
||||||
TCPACKSkippedFinWait2 float64
|
TCPACKSkippedSeq *float64
|
||||||
TCPACKSkippedTimeWait float64
|
TCPACKSkippedFinWait2 *float64
|
||||||
TCPACKSkippedChallenge float64
|
TCPACKSkippedTimeWait *float64
|
||||||
TCPWinProbe float64
|
TCPACKSkippedChallenge *float64
|
||||||
TCPKeepAlive float64
|
TCPWinProbe *float64
|
||||||
TCPMTUPFail float64
|
TCPKeepAlive *float64
|
||||||
TCPMTUPSuccess float64
|
TCPMTUPFail *float64
|
||||||
TCPWqueueTooBig float64
|
TCPMTUPSuccess *float64
|
||||||
|
TCPWqueueTooBig *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type IpExt struct { // nolint:revive
|
type IpExt struct { // nolint:revive
|
||||||
InNoRoutes float64
|
InNoRoutes *float64
|
||||||
InTruncatedPkts float64
|
InTruncatedPkts *float64
|
||||||
InMcastPkts float64
|
InMcastPkts *float64
|
||||||
OutMcastPkts float64
|
OutMcastPkts *float64
|
||||||
InBcastPkts float64
|
InBcastPkts *float64
|
||||||
OutBcastPkts float64
|
OutBcastPkts *float64
|
||||||
InOctets float64
|
InOctets *float64
|
||||||
OutOctets float64
|
OutOctets *float64
|
||||||
InMcastOctets float64
|
InMcastOctets *float64
|
||||||
OutMcastOctets float64
|
OutMcastOctets *float64
|
||||||
InBcastOctets float64
|
InBcastOctets *float64
|
||||||
OutBcastOctets float64
|
OutBcastOctets *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
InNoECTPkts float64
|
InNoECTPkts *float64
|
||||||
InECT1Pkts float64
|
InECT1Pkts *float64
|
||||||
InECT0Pkts float64
|
InECT0Pkts *float64
|
||||||
InCEPkts float64
|
InCEPkts *float64
|
||||||
ReasmOverlaps float64
|
ReasmOverlaps *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) Netstat() (ProcNetstat, error) {
|
func (p Proc) Netstat() (ProcNetstat, error) {
|
||||||
@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcNetstat{PID: p.PID}, err
|
return ProcNetstat{PID: p.PID}, err
|
||||||
}
|
}
|
||||||
procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
|
procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
|
||||||
procNetstat.PID = p.PID
|
procNetstat.PID = p.PID
|
||||||
return procNetstat, err
|
return procNetstat, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseNetstat parses the metrics from proc/<pid>/net/netstat file
|
// parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
|
||||||
// and returns a ProcNetstat structure.
|
// and returns a ProcNetstat structure.
|
||||||
func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
||||||
var (
|
var (
|
||||||
scanner = bufio.NewScanner(r)
|
scanner = bufio.NewScanner(r)
|
||||||
procNetstat = ProcNetstat{}
|
procNetstat = ProcNetstat{}
|
||||||
@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
|||||||
case "TcpExt":
|
case "TcpExt":
|
||||||
switch key {
|
switch key {
|
||||||
case "SyncookiesSent":
|
case "SyncookiesSent":
|
||||||
procNetstat.TcpExt.SyncookiesSent = value
|
procNetstat.TcpExt.SyncookiesSent = &value
|
||||||
case "SyncookiesRecv":
|
case "SyncookiesRecv":
|
||||||
procNetstat.TcpExt.SyncookiesRecv = value
|
procNetstat.TcpExt.SyncookiesRecv = &value
|
||||||
case "SyncookiesFailed":
|
case "SyncookiesFailed":
|
||||||
procNetstat.TcpExt.SyncookiesFailed = value
|
procNetstat.TcpExt.SyncookiesFailed = &value
|
||||||
case "EmbryonicRsts":
|
case "EmbryonicRsts":
|
||||||
procNetstat.TcpExt.EmbryonicRsts = value
|
procNetstat.TcpExt.EmbryonicRsts = &value
|
||||||
case "PruneCalled":
|
case "PruneCalled":
|
||||||
procNetstat.TcpExt.PruneCalled = value
|
procNetstat.TcpExt.PruneCalled = &value
|
||||||
case "RcvPruned":
|
case "RcvPruned":
|
||||||
procNetstat.TcpExt.RcvPruned = value
|
procNetstat.TcpExt.RcvPruned = &value
|
||||||
case "OfoPruned":
|
case "OfoPruned":
|
||||||
procNetstat.TcpExt.OfoPruned = value
|
procNetstat.TcpExt.OfoPruned = &value
|
||||||
case "OutOfWindowIcmps":
|
case "OutOfWindowIcmps":
|
||||||
procNetstat.TcpExt.OutOfWindowIcmps = value
|
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
||||||
case "LockDroppedIcmps":
|
case "LockDroppedIcmps":
|
||||||
procNetstat.TcpExt.LockDroppedIcmps = value
|
procNetstat.TcpExt.LockDroppedIcmps = &value
|
||||||
case "ArpFilter":
|
case "ArpFilter":
|
||||||
procNetstat.TcpExt.ArpFilter = value
|
procNetstat.TcpExt.ArpFilter = &value
|
||||||
case "TW":
|
case "TW":
|
||||||
procNetstat.TcpExt.TW = value
|
procNetstat.TcpExt.TW = &value
|
||||||
case "TWRecycled":
|
case "TWRecycled":
|
||||||
procNetstat.TcpExt.TWRecycled = value
|
procNetstat.TcpExt.TWRecycled = &value
|
||||||
case "TWKilled":
|
case "TWKilled":
|
||||||
procNetstat.TcpExt.TWKilled = value
|
procNetstat.TcpExt.TWKilled = &value
|
||||||
case "PAWSActive":
|
case "PAWSActive":
|
||||||
procNetstat.TcpExt.PAWSActive = value
|
procNetstat.TcpExt.PAWSActive = &value
|
||||||
case "PAWSEstab":
|
case "PAWSEstab":
|
||||||
procNetstat.TcpExt.PAWSEstab = value
|
procNetstat.TcpExt.PAWSEstab = &value
|
||||||
case "DelayedACKs":
|
case "DelayedACKs":
|
||||||
procNetstat.TcpExt.DelayedACKs = value
|
procNetstat.TcpExt.DelayedACKs = &value
|
||||||
case "DelayedACKLocked":
|
case "DelayedACKLocked":
|
||||||
procNetstat.TcpExt.DelayedACKLocked = value
|
procNetstat.TcpExt.DelayedACKLocked = &value
|
||||||
case "DelayedACKLost":
|
case "DelayedACKLost":
|
||||||
procNetstat.TcpExt.DelayedACKLost = value
|
procNetstat.TcpExt.DelayedACKLost = &value
|
||||||
case "ListenOverflows":
|
case "ListenOverflows":
|
||||||
procNetstat.TcpExt.ListenOverflows = value
|
procNetstat.TcpExt.ListenOverflows = &value
|
||||||
case "ListenDrops":
|
case "ListenDrops":
|
||||||
procNetstat.TcpExt.ListenDrops = value
|
procNetstat.TcpExt.ListenDrops = &value
|
||||||
case "TCPHPHits":
|
case "TCPHPHits":
|
||||||
procNetstat.TcpExt.TCPHPHits = value
|
procNetstat.TcpExt.TCPHPHits = &value
|
||||||
case "TCPPureAcks":
|
case "TCPPureAcks":
|
||||||
procNetstat.TcpExt.TCPPureAcks = value
|
procNetstat.TcpExt.TCPPureAcks = &value
|
||||||
case "TCPHPAcks":
|
case "TCPHPAcks":
|
||||||
procNetstat.TcpExt.TCPHPAcks = value
|
procNetstat.TcpExt.TCPHPAcks = &value
|
||||||
case "TCPRenoRecovery":
|
case "TCPRenoRecovery":
|
||||||
procNetstat.TcpExt.TCPRenoRecovery = value
|
procNetstat.TcpExt.TCPRenoRecovery = &value
|
||||||
case "TCPSackRecovery":
|
case "TCPSackRecovery":
|
||||||
procNetstat.TcpExt.TCPSackRecovery = value
|
procNetstat.TcpExt.TCPSackRecovery = &value
|
||||||
case "TCPSACKReneging":
|
case "TCPSACKReneging":
|
||||||
procNetstat.TcpExt.TCPSACKReneging = value
|
procNetstat.TcpExt.TCPSACKReneging = &value
|
||||||
case "TCPSACKReorder":
|
case "TCPSACKReorder":
|
||||||
procNetstat.TcpExt.TCPSACKReorder = value
|
procNetstat.TcpExt.TCPSACKReorder = &value
|
||||||
case "TCPRenoReorder":
|
case "TCPRenoReorder":
|
||||||
procNetstat.TcpExt.TCPRenoReorder = value
|
procNetstat.TcpExt.TCPRenoReorder = &value
|
||||||
case "TCPTSReorder":
|
case "TCPTSReorder":
|
||||||
procNetstat.TcpExt.TCPTSReorder = value
|
procNetstat.TcpExt.TCPTSReorder = &value
|
||||||
case "TCPFullUndo":
|
case "TCPFullUndo":
|
||||||
procNetstat.TcpExt.TCPFullUndo = value
|
procNetstat.TcpExt.TCPFullUndo = &value
|
||||||
case "TCPPartialUndo":
|
case "TCPPartialUndo":
|
||||||
procNetstat.TcpExt.TCPPartialUndo = value
|
procNetstat.TcpExt.TCPPartialUndo = &value
|
||||||
case "TCPDSACKUndo":
|
case "TCPDSACKUndo":
|
||||||
procNetstat.TcpExt.TCPDSACKUndo = value
|
procNetstat.TcpExt.TCPDSACKUndo = &value
|
||||||
case "TCPLossUndo":
|
case "TCPLossUndo":
|
||||||
procNetstat.TcpExt.TCPLossUndo = value
|
procNetstat.TcpExt.TCPLossUndo = &value
|
||||||
case "TCPLostRetransmit":
|
case "TCPLostRetransmit":
|
||||||
procNetstat.TcpExt.TCPLostRetransmit = value
|
procNetstat.TcpExt.TCPLostRetransmit = &value
|
||||||
case "TCPRenoFailures":
|
case "TCPRenoFailures":
|
||||||
procNetstat.TcpExt.TCPRenoFailures = value
|
procNetstat.TcpExt.TCPRenoFailures = &value
|
||||||
case "TCPSackFailures":
|
case "TCPSackFailures":
|
||||||
procNetstat.TcpExt.TCPSackFailures = value
|
procNetstat.TcpExt.TCPSackFailures = &value
|
||||||
case "TCPLossFailures":
|
case "TCPLossFailures":
|
||||||
procNetstat.TcpExt.TCPLossFailures = value
|
procNetstat.TcpExt.TCPLossFailures = &value
|
||||||
case "TCPFastRetrans":
|
case "TCPFastRetrans":
|
||||||
procNetstat.TcpExt.TCPFastRetrans = value
|
procNetstat.TcpExt.TCPFastRetrans = &value
|
||||||
case "TCPSlowStartRetrans":
|
case "TCPSlowStartRetrans":
|
||||||
procNetstat.TcpExt.TCPSlowStartRetrans = value
|
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
||||||
case "TCPTimeouts":
|
case "TCPTimeouts":
|
||||||
procNetstat.TcpExt.TCPTimeouts = value
|
procNetstat.TcpExt.TCPTimeouts = &value
|
||||||
case "TCPLossProbes":
|
case "TCPLossProbes":
|
||||||
procNetstat.TcpExt.TCPLossProbes = value
|
procNetstat.TcpExt.TCPLossProbes = &value
|
||||||
case "TCPLossProbeRecovery":
|
case "TCPLossProbeRecovery":
|
||||||
procNetstat.TcpExt.TCPLossProbeRecovery = value
|
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
||||||
case "TCPRenoRecoveryFail":
|
case "TCPRenoRecoveryFail":
|
||||||
procNetstat.TcpExt.TCPRenoRecoveryFail = value
|
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
||||||
case "TCPSackRecoveryFail":
|
case "TCPSackRecoveryFail":
|
||||||
procNetstat.TcpExt.TCPSackRecoveryFail = value
|
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
||||||
case "TCPRcvCollapsed":
|
case "TCPRcvCollapsed":
|
||||||
procNetstat.TcpExt.TCPRcvCollapsed = value
|
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
||||||
case "TCPDSACKOldSent":
|
case "TCPDSACKOldSent":
|
||||||
procNetstat.TcpExt.TCPDSACKOldSent = value
|
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
||||||
case "TCPDSACKOfoSent":
|
case "TCPDSACKOfoSent":
|
||||||
procNetstat.TcpExt.TCPDSACKOfoSent = value
|
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
||||||
case "TCPDSACKRecv":
|
case "TCPDSACKRecv":
|
||||||
procNetstat.TcpExt.TCPDSACKRecv = value
|
procNetstat.TcpExt.TCPDSACKRecv = &value
|
||||||
case "TCPDSACKOfoRecv":
|
case "TCPDSACKOfoRecv":
|
||||||
procNetstat.TcpExt.TCPDSACKOfoRecv = value
|
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
||||||
case "TCPAbortOnData":
|
case "TCPAbortOnData":
|
||||||
procNetstat.TcpExt.TCPAbortOnData = value
|
procNetstat.TcpExt.TCPAbortOnData = &value
|
||||||
case "TCPAbortOnClose":
|
case "TCPAbortOnClose":
|
||||||
procNetstat.TcpExt.TCPAbortOnClose = value
|
procNetstat.TcpExt.TCPAbortOnClose = &value
|
||||||
case "TCPDeferAcceptDrop":
|
case "TCPDeferAcceptDrop":
|
||||||
procNetstat.TcpExt.TCPDeferAcceptDrop = value
|
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
||||||
case "IPReversePathFilter":
|
case "IPReversePathFilter":
|
||||||
procNetstat.TcpExt.IPReversePathFilter = value
|
procNetstat.TcpExt.IPReversePathFilter = &value
|
||||||
case "TCPTimeWaitOverflow":
|
case "TCPTimeWaitOverflow":
|
||||||
procNetstat.TcpExt.TCPTimeWaitOverflow = value
|
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
||||||
case "TCPReqQFullDoCookies":
|
case "TCPReqQFullDoCookies":
|
||||||
procNetstat.TcpExt.TCPReqQFullDoCookies = value
|
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
||||||
case "TCPReqQFullDrop":
|
case "TCPReqQFullDrop":
|
||||||
procNetstat.TcpExt.TCPReqQFullDrop = value
|
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
||||||
case "TCPRetransFail":
|
case "TCPRetransFail":
|
||||||
procNetstat.TcpExt.TCPRetransFail = value
|
procNetstat.TcpExt.TCPRetransFail = &value
|
||||||
case "TCPRcvCoalesce":
|
case "TCPRcvCoalesce":
|
||||||
procNetstat.TcpExt.TCPRcvCoalesce = value
|
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
||||||
|
case "TCPRcvQDrop":
|
||||||
|
procNetstat.TcpExt.TCPRcvQDrop = &value
|
||||||
case "TCPOFOQueue":
|
case "TCPOFOQueue":
|
||||||
procNetstat.TcpExt.TCPOFOQueue = value
|
procNetstat.TcpExt.TCPOFOQueue = &value
|
||||||
case "TCPOFODrop":
|
case "TCPOFODrop":
|
||||||
procNetstat.TcpExt.TCPOFODrop = value
|
procNetstat.TcpExt.TCPOFODrop = &value
|
||||||
case "TCPOFOMerge":
|
case "TCPOFOMerge":
|
||||||
procNetstat.TcpExt.TCPOFOMerge = value
|
procNetstat.TcpExt.TCPOFOMerge = &value
|
||||||
case "TCPChallengeACK":
|
case "TCPChallengeACK":
|
||||||
procNetstat.TcpExt.TCPChallengeACK = value
|
procNetstat.TcpExt.TCPChallengeACK = &value
|
||||||
case "TCPSYNChallenge":
|
case "TCPSYNChallenge":
|
||||||
procNetstat.TcpExt.TCPSYNChallenge = value
|
procNetstat.TcpExt.TCPSYNChallenge = &value
|
||||||
case "TCPFastOpenActive":
|
case "TCPFastOpenActive":
|
||||||
procNetstat.TcpExt.TCPFastOpenActive = value
|
procNetstat.TcpExt.TCPFastOpenActive = &value
|
||||||
case "TCPFastOpenActiveFail":
|
case "TCPFastOpenActiveFail":
|
||||||
procNetstat.TcpExt.TCPFastOpenActiveFail = value
|
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
||||||
case "TCPFastOpenPassive":
|
case "TCPFastOpenPassive":
|
||||||
procNetstat.TcpExt.TCPFastOpenPassive = value
|
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
||||||
case "TCPFastOpenPassiveFail":
|
case "TCPFastOpenPassiveFail":
|
||||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = value
|
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
||||||
case "TCPFastOpenListenOverflow":
|
case "TCPFastOpenListenOverflow":
|
||||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = value
|
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
||||||
case "TCPFastOpenCookieReqd":
|
case "TCPFastOpenCookieReqd":
|
||||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = value
|
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
||||||
case "TCPFastOpenBlackhole":
|
case "TCPFastOpenBlackhole":
|
||||||
procNetstat.TcpExt.TCPFastOpenBlackhole = value
|
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
||||||
case "TCPSpuriousRtxHostQueues":
|
case "TCPSpuriousRtxHostQueues":
|
||||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
|
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
||||||
case "BusyPollRxPackets":
|
case "BusyPollRxPackets":
|
||||||
procNetstat.TcpExt.BusyPollRxPackets = value
|
procNetstat.TcpExt.BusyPollRxPackets = &value
|
||||||
case "TCPAutoCorking":
|
case "TCPAutoCorking":
|
||||||
procNetstat.TcpExt.TCPAutoCorking = value
|
procNetstat.TcpExt.TCPAutoCorking = &value
|
||||||
case "TCPFromZeroWindowAdv":
|
case "TCPFromZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = value
|
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
||||||
case "TCPToZeroWindowAdv":
|
case "TCPToZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPToZeroWindowAdv = value
|
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
||||||
case "TCPWantZeroWindowAdv":
|
case "TCPWantZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = value
|
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
||||||
case "TCPSynRetrans":
|
case "TCPSynRetrans":
|
||||||
procNetstat.TcpExt.TCPSynRetrans = value
|
procNetstat.TcpExt.TCPSynRetrans = &value
|
||||||
case "TCPOrigDataSent":
|
case "TCPOrigDataSent":
|
||||||
procNetstat.TcpExt.TCPOrigDataSent = value
|
procNetstat.TcpExt.TCPOrigDataSent = &value
|
||||||
case "TCPHystartTrainDetect":
|
case "TCPHystartTrainDetect":
|
||||||
procNetstat.TcpExt.TCPHystartTrainDetect = value
|
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
||||||
case "TCPHystartTrainCwnd":
|
case "TCPHystartTrainCwnd":
|
||||||
procNetstat.TcpExt.TCPHystartTrainCwnd = value
|
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
||||||
case "TCPHystartDelayDetect":
|
case "TCPHystartDelayDetect":
|
||||||
procNetstat.TcpExt.TCPHystartDelayDetect = value
|
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
||||||
case "TCPHystartDelayCwnd":
|
case "TCPHystartDelayCwnd":
|
||||||
procNetstat.TcpExt.TCPHystartDelayCwnd = value
|
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
||||||
case "TCPACKSkippedSynRecv":
|
case "TCPACKSkippedSynRecv":
|
||||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = value
|
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
||||||
case "TCPACKSkippedPAWS":
|
case "TCPACKSkippedPAWS":
|
||||||
procNetstat.TcpExt.TCPACKSkippedPAWS = value
|
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
||||||
case "TCPACKSkippedSeq":
|
case "TCPACKSkippedSeq":
|
||||||
procNetstat.TcpExt.TCPACKSkippedSeq = value
|
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
||||||
case "TCPACKSkippedFinWait2":
|
case "TCPACKSkippedFinWait2":
|
||||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
|
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
||||||
case "TCPACKSkippedTimeWait":
|
case "TCPACKSkippedTimeWait":
|
||||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = value
|
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
||||||
case "TCPACKSkippedChallenge":
|
case "TCPACKSkippedChallenge":
|
||||||
procNetstat.TcpExt.TCPACKSkippedChallenge = value
|
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
||||||
case "TCPWinProbe":
|
case "TCPWinProbe":
|
||||||
procNetstat.TcpExt.TCPWinProbe = value
|
procNetstat.TcpExt.TCPWinProbe = &value
|
||||||
case "TCPKeepAlive":
|
case "TCPKeepAlive":
|
||||||
procNetstat.TcpExt.TCPKeepAlive = value
|
procNetstat.TcpExt.TCPKeepAlive = &value
|
||||||
case "TCPMTUPFail":
|
case "TCPMTUPFail":
|
||||||
procNetstat.TcpExt.TCPMTUPFail = value
|
procNetstat.TcpExt.TCPMTUPFail = &value
|
||||||
case "TCPMTUPSuccess":
|
case "TCPMTUPSuccess":
|
||||||
procNetstat.TcpExt.TCPMTUPSuccess = value
|
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
||||||
case "TCPWqueueTooBig":
|
case "TCPWqueueTooBig":
|
||||||
procNetstat.TcpExt.TCPWqueueTooBig = value
|
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
||||||
}
|
}
|
||||||
case "IpExt":
|
case "IpExt":
|
||||||
switch key {
|
switch key {
|
||||||
case "InNoRoutes":
|
case "InNoRoutes":
|
||||||
procNetstat.IpExt.InNoRoutes = value
|
procNetstat.IpExt.InNoRoutes = &value
|
||||||
case "InTruncatedPkts":
|
case "InTruncatedPkts":
|
||||||
procNetstat.IpExt.InTruncatedPkts = value
|
procNetstat.IpExt.InTruncatedPkts = &value
|
||||||
case "InMcastPkts":
|
case "InMcastPkts":
|
||||||
procNetstat.IpExt.InMcastPkts = value
|
procNetstat.IpExt.InMcastPkts = &value
|
||||||
case "OutMcastPkts":
|
case "OutMcastPkts":
|
||||||
procNetstat.IpExt.OutMcastPkts = value
|
procNetstat.IpExt.OutMcastPkts = &value
|
||||||
case "InBcastPkts":
|
case "InBcastPkts":
|
||||||
procNetstat.IpExt.InBcastPkts = value
|
procNetstat.IpExt.InBcastPkts = &value
|
||||||
case "OutBcastPkts":
|
case "OutBcastPkts":
|
||||||
procNetstat.IpExt.OutBcastPkts = value
|
procNetstat.IpExt.OutBcastPkts = &value
|
||||||
case "InOctets":
|
case "InOctets":
|
||||||
procNetstat.IpExt.InOctets = value
|
procNetstat.IpExt.InOctets = &value
|
||||||
case "OutOctets":
|
case "OutOctets":
|
||||||
procNetstat.IpExt.OutOctets = value
|
procNetstat.IpExt.OutOctets = &value
|
||||||
case "InMcastOctets":
|
case "InMcastOctets":
|
||||||
procNetstat.IpExt.InMcastOctets = value
|
procNetstat.IpExt.InMcastOctets = &value
|
||||||
case "OutMcastOctets":
|
case "OutMcastOctets":
|
||||||
procNetstat.IpExt.OutMcastOctets = value
|
procNetstat.IpExt.OutMcastOctets = &value
|
||||||
case "InBcastOctets":
|
case "InBcastOctets":
|
||||||
procNetstat.IpExt.InBcastOctets = value
|
procNetstat.IpExt.InBcastOctets = &value
|
||||||
case "OutBcastOctets":
|
case "OutBcastOctets":
|
||||||
procNetstat.IpExt.OutBcastOctets = value
|
procNetstat.IpExt.OutBcastOctets = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procNetstat.IpExt.InCsumErrors = value
|
procNetstat.IpExt.InCsumErrors = &value
|
||||||
case "InNoECTPkts":
|
case "InNoECTPkts":
|
||||||
procNetstat.IpExt.InNoECTPkts = value
|
procNetstat.IpExt.InNoECTPkts = &value
|
||||||
case "InECT1Pkts":
|
case "InECT1Pkts":
|
||||||
procNetstat.IpExt.InECT1Pkts = value
|
procNetstat.IpExt.InECT1Pkts = &value
|
||||||
case "InECT0Pkts":
|
case "InECT0Pkts":
|
||||||
procNetstat.IpExt.InECT0Pkts = value
|
procNetstat.IpExt.InECT0Pkts = &value
|
||||||
case "InCEPkts":
|
case "InCEPkts":
|
||||||
procNetstat.IpExt.InCEPkts = value
|
procNetstat.IpExt.InCEPkts = &value
|
||||||
case "ReasmOverlaps":
|
case "ReasmOverlaps":
|
||||||
procNetstat.IpExt.ReasmOverlaps = value
|
procNetstat.IpExt.ReasmOverlaps = &value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
318
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
318
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
@ -37,100 +37,100 @@ type ProcSnmp struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Ip struct { // nolint:revive
|
type Ip struct { // nolint:revive
|
||||||
Forwarding float64
|
Forwarding *float64
|
||||||
DefaultTTL float64
|
DefaultTTL *float64
|
||||||
InReceives float64
|
InReceives *float64
|
||||||
InHdrErrors float64
|
InHdrErrors *float64
|
||||||
InAddrErrors float64
|
InAddrErrors *float64
|
||||||
ForwDatagrams float64
|
ForwDatagrams *float64
|
||||||
InUnknownProtos float64
|
InUnknownProtos *float64
|
||||||
InDiscards float64
|
InDiscards *float64
|
||||||
InDelivers float64
|
InDelivers *float64
|
||||||
OutRequests float64
|
OutRequests *float64
|
||||||
OutDiscards float64
|
OutDiscards *float64
|
||||||
OutNoRoutes float64
|
OutNoRoutes *float64
|
||||||
ReasmTimeout float64
|
ReasmTimeout *float64
|
||||||
ReasmReqds float64
|
ReasmReqds *float64
|
||||||
ReasmOKs float64
|
ReasmOKs *float64
|
||||||
ReasmFails float64
|
ReasmFails *float64
|
||||||
FragOKs float64
|
FragOKs *float64
|
||||||
FragFails float64
|
FragFails *float64
|
||||||
FragCreates float64
|
FragCreates *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Icmp struct {
|
type Icmp struct { // nolint:revive
|
||||||
InMsgs float64
|
InMsgs *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
InDestUnreachs float64
|
InDestUnreachs *float64
|
||||||
InTimeExcds float64
|
InTimeExcds *float64
|
||||||
InParmProbs float64
|
InParmProbs *float64
|
||||||
InSrcQuenchs float64
|
InSrcQuenchs *float64
|
||||||
InRedirects float64
|
InRedirects *float64
|
||||||
InEchos float64
|
InEchos *float64
|
||||||
InEchoReps float64
|
InEchoReps *float64
|
||||||
InTimestamps float64
|
InTimestamps *float64
|
||||||
InTimestampReps float64
|
InTimestampReps *float64
|
||||||
InAddrMasks float64
|
InAddrMasks *float64
|
||||||
InAddrMaskReps float64
|
InAddrMaskReps *float64
|
||||||
OutMsgs float64
|
OutMsgs *float64
|
||||||
OutErrors float64
|
OutErrors *float64
|
||||||
OutDestUnreachs float64
|
OutDestUnreachs *float64
|
||||||
OutTimeExcds float64
|
OutTimeExcds *float64
|
||||||
OutParmProbs float64
|
OutParmProbs *float64
|
||||||
OutSrcQuenchs float64
|
OutSrcQuenchs *float64
|
||||||
OutRedirects float64
|
OutRedirects *float64
|
||||||
OutEchos float64
|
OutEchos *float64
|
||||||
OutEchoReps float64
|
OutEchoReps *float64
|
||||||
OutTimestamps float64
|
OutTimestamps *float64
|
||||||
OutTimestampReps float64
|
OutTimestampReps *float64
|
||||||
OutAddrMasks float64
|
OutAddrMasks *float64
|
||||||
OutAddrMaskReps float64
|
OutAddrMaskReps *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type IcmpMsg struct {
|
type IcmpMsg struct {
|
||||||
InType3 float64
|
InType3 *float64
|
||||||
OutType3 float64
|
OutType3 *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tcp struct { // nolint:revive
|
type Tcp struct { // nolint:revive
|
||||||
RtoAlgorithm float64
|
RtoAlgorithm *float64
|
||||||
RtoMin float64
|
RtoMin *float64
|
||||||
RtoMax float64
|
RtoMax *float64
|
||||||
MaxConn float64
|
MaxConn *float64
|
||||||
ActiveOpens float64
|
ActiveOpens *float64
|
||||||
PassiveOpens float64
|
PassiveOpens *float64
|
||||||
AttemptFails float64
|
AttemptFails *float64
|
||||||
EstabResets float64
|
EstabResets *float64
|
||||||
CurrEstab float64
|
CurrEstab *float64
|
||||||
InSegs float64
|
InSegs *float64
|
||||||
OutSegs float64
|
OutSegs *float64
|
||||||
RetransSegs float64
|
RetransSegs *float64
|
||||||
InErrs float64
|
InErrs *float64
|
||||||
OutRsts float64
|
OutRsts *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Udp struct { // nolint:revive
|
type Udp struct { // nolint:revive
|
||||||
InDatagrams float64
|
InDatagrams *float64
|
||||||
NoPorts float64
|
NoPorts *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
OutDatagrams float64
|
OutDatagrams *float64
|
||||||
RcvbufErrors float64
|
RcvbufErrors *float64
|
||||||
SndbufErrors float64
|
SndbufErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
IgnoredMulti float64
|
IgnoredMulti *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type UdpLite struct { // nolint:revive
|
type UdpLite struct { // nolint:revive
|
||||||
InDatagrams float64
|
InDatagrams *float64
|
||||||
NoPorts float64
|
NoPorts *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
OutDatagrams float64
|
OutDatagrams *float64
|
||||||
RcvbufErrors float64
|
RcvbufErrors *float64
|
||||||
SndbufErrors float64
|
SndbufErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
IgnoredMulti float64
|
IgnoredMulti *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) Snmp() (ProcSnmp, error) {
|
func (p Proc) Snmp() (ProcSnmp, error) {
|
||||||
@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
|||||||
case "Ip":
|
case "Ip":
|
||||||
switch key {
|
switch key {
|
||||||
case "Forwarding":
|
case "Forwarding":
|
||||||
procSnmp.Ip.Forwarding = value
|
procSnmp.Ip.Forwarding = &value
|
||||||
case "DefaultTTL":
|
case "DefaultTTL":
|
||||||
procSnmp.Ip.DefaultTTL = value
|
procSnmp.Ip.DefaultTTL = &value
|
||||||
case "InReceives":
|
case "InReceives":
|
||||||
procSnmp.Ip.InReceives = value
|
procSnmp.Ip.InReceives = &value
|
||||||
case "InHdrErrors":
|
case "InHdrErrors":
|
||||||
procSnmp.Ip.InHdrErrors = value
|
procSnmp.Ip.InHdrErrors = &value
|
||||||
case "InAddrErrors":
|
case "InAddrErrors":
|
||||||
procSnmp.Ip.InAddrErrors = value
|
procSnmp.Ip.InAddrErrors = &value
|
||||||
case "ForwDatagrams":
|
case "ForwDatagrams":
|
||||||
procSnmp.Ip.ForwDatagrams = value
|
procSnmp.Ip.ForwDatagrams = &value
|
||||||
case "InUnknownProtos":
|
case "InUnknownProtos":
|
||||||
procSnmp.Ip.InUnknownProtos = value
|
procSnmp.Ip.InUnknownProtos = &value
|
||||||
case "InDiscards":
|
case "InDiscards":
|
||||||
procSnmp.Ip.InDiscards = value
|
procSnmp.Ip.InDiscards = &value
|
||||||
case "InDelivers":
|
case "InDelivers":
|
||||||
procSnmp.Ip.InDelivers = value
|
procSnmp.Ip.InDelivers = &value
|
||||||
case "OutRequests":
|
case "OutRequests":
|
||||||
procSnmp.Ip.OutRequests = value
|
procSnmp.Ip.OutRequests = &value
|
||||||
case "OutDiscards":
|
case "OutDiscards":
|
||||||
procSnmp.Ip.OutDiscards = value
|
procSnmp.Ip.OutDiscards = &value
|
||||||
case "OutNoRoutes":
|
case "OutNoRoutes":
|
||||||
procSnmp.Ip.OutNoRoutes = value
|
procSnmp.Ip.OutNoRoutes = &value
|
||||||
case "ReasmTimeout":
|
case "ReasmTimeout":
|
||||||
procSnmp.Ip.ReasmTimeout = value
|
procSnmp.Ip.ReasmTimeout = &value
|
||||||
case "ReasmReqds":
|
case "ReasmReqds":
|
||||||
procSnmp.Ip.ReasmReqds = value
|
procSnmp.Ip.ReasmReqds = &value
|
||||||
case "ReasmOKs":
|
case "ReasmOKs":
|
||||||
procSnmp.Ip.ReasmOKs = value
|
procSnmp.Ip.ReasmOKs = &value
|
||||||
case "ReasmFails":
|
case "ReasmFails":
|
||||||
procSnmp.Ip.ReasmFails = value
|
procSnmp.Ip.ReasmFails = &value
|
||||||
case "FragOKs":
|
case "FragOKs":
|
||||||
procSnmp.Ip.FragOKs = value
|
procSnmp.Ip.FragOKs = &value
|
||||||
case "FragFails":
|
case "FragFails":
|
||||||
procSnmp.Ip.FragFails = value
|
procSnmp.Ip.FragFails = &value
|
||||||
case "FragCreates":
|
case "FragCreates":
|
||||||
procSnmp.Ip.FragCreates = value
|
procSnmp.Ip.FragCreates = &value
|
||||||
}
|
}
|
||||||
case "Icmp":
|
case "Icmp":
|
||||||
switch key {
|
switch key {
|
||||||
case "InMsgs":
|
case "InMsgs":
|
||||||
procSnmp.Icmp.InMsgs = value
|
procSnmp.Icmp.InMsgs = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp.Icmp.InErrors = value
|
procSnmp.Icmp.InErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.Icmp.InCsumErrors = value
|
procSnmp.Icmp.InCsumErrors = &value
|
||||||
case "InDestUnreachs":
|
case "InDestUnreachs":
|
||||||
procSnmp.Icmp.InDestUnreachs = value
|
procSnmp.Icmp.InDestUnreachs = &value
|
||||||
case "InTimeExcds":
|
case "InTimeExcds":
|
||||||
procSnmp.Icmp.InTimeExcds = value
|
procSnmp.Icmp.InTimeExcds = &value
|
||||||
case "InParmProbs":
|
case "InParmProbs":
|
||||||
procSnmp.Icmp.InParmProbs = value
|
procSnmp.Icmp.InParmProbs = &value
|
||||||
case "InSrcQuenchs":
|
case "InSrcQuenchs":
|
||||||
procSnmp.Icmp.InSrcQuenchs = value
|
procSnmp.Icmp.InSrcQuenchs = &value
|
||||||
case "InRedirects":
|
case "InRedirects":
|
||||||
procSnmp.Icmp.InRedirects = value
|
procSnmp.Icmp.InRedirects = &value
|
||||||
case "InEchos":
|
case "InEchos":
|
||||||
procSnmp.Icmp.InEchos = value
|
procSnmp.Icmp.InEchos = &value
|
||||||
case "InEchoReps":
|
case "InEchoReps":
|
||||||
procSnmp.Icmp.InEchoReps = value
|
procSnmp.Icmp.InEchoReps = &value
|
||||||
case "InTimestamps":
|
case "InTimestamps":
|
||||||
procSnmp.Icmp.InTimestamps = value
|
procSnmp.Icmp.InTimestamps = &value
|
||||||
case "InTimestampReps":
|
case "InTimestampReps":
|
||||||
procSnmp.Icmp.InTimestampReps = value
|
procSnmp.Icmp.InTimestampReps = &value
|
||||||
case "InAddrMasks":
|
case "InAddrMasks":
|
||||||
procSnmp.Icmp.InAddrMasks = value
|
procSnmp.Icmp.InAddrMasks = &value
|
||||||
case "InAddrMaskReps":
|
case "InAddrMaskReps":
|
||||||
procSnmp.Icmp.InAddrMaskReps = value
|
procSnmp.Icmp.InAddrMaskReps = &value
|
||||||
case "OutMsgs":
|
case "OutMsgs":
|
||||||
procSnmp.Icmp.OutMsgs = value
|
procSnmp.Icmp.OutMsgs = &value
|
||||||
case "OutErrors":
|
case "OutErrors":
|
||||||
procSnmp.Icmp.OutErrors = value
|
procSnmp.Icmp.OutErrors = &value
|
||||||
case "OutDestUnreachs":
|
case "OutDestUnreachs":
|
||||||
procSnmp.Icmp.OutDestUnreachs = value
|
procSnmp.Icmp.OutDestUnreachs = &value
|
||||||
case "OutTimeExcds":
|
case "OutTimeExcds":
|
||||||
procSnmp.Icmp.OutTimeExcds = value
|
procSnmp.Icmp.OutTimeExcds = &value
|
||||||
case "OutParmProbs":
|
case "OutParmProbs":
|
||||||
procSnmp.Icmp.OutParmProbs = value
|
procSnmp.Icmp.OutParmProbs = &value
|
||||||
case "OutSrcQuenchs":
|
case "OutSrcQuenchs":
|
||||||
procSnmp.Icmp.OutSrcQuenchs = value
|
procSnmp.Icmp.OutSrcQuenchs = &value
|
||||||
case "OutRedirects":
|
case "OutRedirects":
|
||||||
procSnmp.Icmp.OutRedirects = value
|
procSnmp.Icmp.OutRedirects = &value
|
||||||
case "OutEchos":
|
case "OutEchos":
|
||||||
procSnmp.Icmp.OutEchos = value
|
procSnmp.Icmp.OutEchos = &value
|
||||||
case "OutEchoReps":
|
case "OutEchoReps":
|
||||||
procSnmp.Icmp.OutEchoReps = value
|
procSnmp.Icmp.OutEchoReps = &value
|
||||||
case "OutTimestamps":
|
case "OutTimestamps":
|
||||||
procSnmp.Icmp.OutTimestamps = value
|
procSnmp.Icmp.OutTimestamps = &value
|
||||||
case "OutTimestampReps":
|
case "OutTimestampReps":
|
||||||
procSnmp.Icmp.OutTimestampReps = value
|
procSnmp.Icmp.OutTimestampReps = &value
|
||||||
case "OutAddrMasks":
|
case "OutAddrMasks":
|
||||||
procSnmp.Icmp.OutAddrMasks = value
|
procSnmp.Icmp.OutAddrMasks = &value
|
||||||
case "OutAddrMaskReps":
|
case "OutAddrMaskReps":
|
||||||
procSnmp.Icmp.OutAddrMaskReps = value
|
procSnmp.Icmp.OutAddrMaskReps = &value
|
||||||
}
|
}
|
||||||
case "IcmpMsg":
|
case "IcmpMsg":
|
||||||
switch key {
|
switch key {
|
||||||
case "InType3":
|
case "InType3":
|
||||||
procSnmp.IcmpMsg.InType3 = value
|
procSnmp.IcmpMsg.InType3 = &value
|
||||||
case "OutType3":
|
case "OutType3":
|
||||||
procSnmp.IcmpMsg.OutType3 = value
|
procSnmp.IcmpMsg.OutType3 = &value
|
||||||
}
|
}
|
||||||
case "Tcp":
|
case "Tcp":
|
||||||
switch key {
|
switch key {
|
||||||
case "RtoAlgorithm":
|
case "RtoAlgorithm":
|
||||||
procSnmp.Tcp.RtoAlgorithm = value
|
procSnmp.Tcp.RtoAlgorithm = &value
|
||||||
case "RtoMin":
|
case "RtoMin":
|
||||||
procSnmp.Tcp.RtoMin = value
|
procSnmp.Tcp.RtoMin = &value
|
||||||
case "RtoMax":
|
case "RtoMax":
|
||||||
procSnmp.Tcp.RtoMax = value
|
procSnmp.Tcp.RtoMax = &value
|
||||||
case "MaxConn":
|
case "MaxConn":
|
||||||
procSnmp.Tcp.MaxConn = value
|
procSnmp.Tcp.MaxConn = &value
|
||||||
case "ActiveOpens":
|
case "ActiveOpens":
|
||||||
procSnmp.Tcp.ActiveOpens = value
|
procSnmp.Tcp.ActiveOpens = &value
|
||||||
case "PassiveOpens":
|
case "PassiveOpens":
|
||||||
procSnmp.Tcp.PassiveOpens = value
|
procSnmp.Tcp.PassiveOpens = &value
|
||||||
case "AttemptFails":
|
case "AttemptFails":
|
||||||
procSnmp.Tcp.AttemptFails = value
|
procSnmp.Tcp.AttemptFails = &value
|
||||||
case "EstabResets":
|
case "EstabResets":
|
||||||
procSnmp.Tcp.EstabResets = value
|
procSnmp.Tcp.EstabResets = &value
|
||||||
case "CurrEstab":
|
case "CurrEstab":
|
||||||
procSnmp.Tcp.CurrEstab = value
|
procSnmp.Tcp.CurrEstab = &value
|
||||||
case "InSegs":
|
case "InSegs":
|
||||||
procSnmp.Tcp.InSegs = value
|
procSnmp.Tcp.InSegs = &value
|
||||||
case "OutSegs":
|
case "OutSegs":
|
||||||
procSnmp.Tcp.OutSegs = value
|
procSnmp.Tcp.OutSegs = &value
|
||||||
case "RetransSegs":
|
case "RetransSegs":
|
||||||
procSnmp.Tcp.RetransSegs = value
|
procSnmp.Tcp.RetransSegs = &value
|
||||||
case "InErrs":
|
case "InErrs":
|
||||||
procSnmp.Tcp.InErrs = value
|
procSnmp.Tcp.InErrs = &value
|
||||||
case "OutRsts":
|
case "OutRsts":
|
||||||
procSnmp.Tcp.OutRsts = value
|
procSnmp.Tcp.OutRsts = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.Tcp.InCsumErrors = value
|
procSnmp.Tcp.InCsumErrors = &value
|
||||||
}
|
}
|
||||||
case "Udp":
|
case "Udp":
|
||||||
switch key {
|
switch key {
|
||||||
case "InDatagrams":
|
case "InDatagrams":
|
||||||
procSnmp.Udp.InDatagrams = value
|
procSnmp.Udp.InDatagrams = &value
|
||||||
case "NoPorts":
|
case "NoPorts":
|
||||||
procSnmp.Udp.NoPorts = value
|
procSnmp.Udp.NoPorts = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp.Udp.InErrors = value
|
procSnmp.Udp.InErrors = &value
|
||||||
case "OutDatagrams":
|
case "OutDatagrams":
|
||||||
procSnmp.Udp.OutDatagrams = value
|
procSnmp.Udp.OutDatagrams = &value
|
||||||
case "RcvbufErrors":
|
case "RcvbufErrors":
|
||||||
procSnmp.Udp.RcvbufErrors = value
|
procSnmp.Udp.RcvbufErrors = &value
|
||||||
case "SndbufErrors":
|
case "SndbufErrors":
|
||||||
procSnmp.Udp.SndbufErrors = value
|
procSnmp.Udp.SndbufErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.Udp.InCsumErrors = value
|
procSnmp.Udp.InCsumErrors = &value
|
||||||
case "IgnoredMulti":
|
case "IgnoredMulti":
|
||||||
procSnmp.Udp.IgnoredMulti = value
|
procSnmp.Udp.IgnoredMulti = &value
|
||||||
}
|
}
|
||||||
case "UdpLite":
|
case "UdpLite":
|
||||||
switch key {
|
switch key {
|
||||||
case "InDatagrams":
|
case "InDatagrams":
|
||||||
procSnmp.UdpLite.InDatagrams = value
|
procSnmp.UdpLite.InDatagrams = &value
|
||||||
case "NoPorts":
|
case "NoPorts":
|
||||||
procSnmp.UdpLite.NoPorts = value
|
procSnmp.UdpLite.NoPorts = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp.UdpLite.InErrors = value
|
procSnmp.UdpLite.InErrors = &value
|
||||||
case "OutDatagrams":
|
case "OutDatagrams":
|
||||||
procSnmp.UdpLite.OutDatagrams = value
|
procSnmp.UdpLite.OutDatagrams = &value
|
||||||
case "RcvbufErrors":
|
case "RcvbufErrors":
|
||||||
procSnmp.UdpLite.RcvbufErrors = value
|
procSnmp.UdpLite.RcvbufErrors = &value
|
||||||
case "SndbufErrors":
|
case "SndbufErrors":
|
||||||
procSnmp.UdpLite.SndbufErrors = value
|
procSnmp.UdpLite.SndbufErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.UdpLite.InCsumErrors = value
|
procSnmp.UdpLite.InCsumErrors = &value
|
||||||
case "IgnoredMulti":
|
case "IgnoredMulti":
|
||||||
procSnmp.UdpLite.IgnoredMulti = value
|
procSnmp.UdpLite.IgnoredMulti = &value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
364
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
364
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
@ -36,106 +36,106 @@ type ProcSnmp6 struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Ip6 struct { // nolint:revive
|
type Ip6 struct { // nolint:revive
|
||||||
InReceives float64
|
InReceives *float64
|
||||||
InHdrErrors float64
|
InHdrErrors *float64
|
||||||
InTooBigErrors float64
|
InTooBigErrors *float64
|
||||||
InNoRoutes float64
|
InNoRoutes *float64
|
||||||
InAddrErrors float64
|
InAddrErrors *float64
|
||||||
InUnknownProtos float64
|
InUnknownProtos *float64
|
||||||
InTruncatedPkts float64
|
InTruncatedPkts *float64
|
||||||
InDiscards float64
|
InDiscards *float64
|
||||||
InDelivers float64
|
InDelivers *float64
|
||||||
OutForwDatagrams float64
|
OutForwDatagrams *float64
|
||||||
OutRequests float64
|
OutRequests *float64
|
||||||
OutDiscards float64
|
OutDiscards *float64
|
||||||
OutNoRoutes float64
|
OutNoRoutes *float64
|
||||||
ReasmTimeout float64
|
ReasmTimeout *float64
|
||||||
ReasmReqds float64
|
ReasmReqds *float64
|
||||||
ReasmOKs float64
|
ReasmOKs *float64
|
||||||
ReasmFails float64
|
ReasmFails *float64
|
||||||
FragOKs float64
|
FragOKs *float64
|
||||||
FragFails float64
|
FragFails *float64
|
||||||
FragCreates float64
|
FragCreates *float64
|
||||||
InMcastPkts float64
|
InMcastPkts *float64
|
||||||
OutMcastPkts float64
|
OutMcastPkts *float64
|
||||||
InOctets float64
|
InOctets *float64
|
||||||
OutOctets float64
|
OutOctets *float64
|
||||||
InMcastOctets float64
|
InMcastOctets *float64
|
||||||
OutMcastOctets float64
|
OutMcastOctets *float64
|
||||||
InBcastOctets float64
|
InBcastOctets *float64
|
||||||
OutBcastOctets float64
|
OutBcastOctets *float64
|
||||||
InNoECTPkts float64
|
InNoECTPkts *float64
|
||||||
InECT1Pkts float64
|
InECT1Pkts *float64
|
||||||
InECT0Pkts float64
|
InECT0Pkts *float64
|
||||||
InCEPkts float64
|
InCEPkts *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Icmp6 struct {
|
type Icmp6 struct {
|
||||||
InMsgs float64
|
InMsgs *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
OutMsgs float64
|
OutMsgs *float64
|
||||||
OutErrors float64
|
OutErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
InDestUnreachs float64
|
InDestUnreachs *float64
|
||||||
InPktTooBigs float64
|
InPktTooBigs *float64
|
||||||
InTimeExcds float64
|
InTimeExcds *float64
|
||||||
InParmProblems float64
|
InParmProblems *float64
|
||||||
InEchos float64
|
InEchos *float64
|
||||||
InEchoReplies float64
|
InEchoReplies *float64
|
||||||
InGroupMembQueries float64
|
InGroupMembQueries *float64
|
||||||
InGroupMembResponses float64
|
InGroupMembResponses *float64
|
||||||
InGroupMembReductions float64
|
InGroupMembReductions *float64
|
||||||
InRouterSolicits float64
|
InRouterSolicits *float64
|
||||||
InRouterAdvertisements float64
|
InRouterAdvertisements *float64
|
||||||
InNeighborSolicits float64
|
InNeighborSolicits *float64
|
||||||
InNeighborAdvertisements float64
|
InNeighborAdvertisements *float64
|
||||||
InRedirects float64
|
InRedirects *float64
|
||||||
InMLDv2Reports float64
|
InMLDv2Reports *float64
|
||||||
OutDestUnreachs float64
|
OutDestUnreachs *float64
|
||||||
OutPktTooBigs float64
|
OutPktTooBigs *float64
|
||||||
OutTimeExcds float64
|
OutTimeExcds *float64
|
||||||
OutParmProblems float64
|
OutParmProblems *float64
|
||||||
OutEchos float64
|
OutEchos *float64
|
||||||
OutEchoReplies float64
|
OutEchoReplies *float64
|
||||||
OutGroupMembQueries float64
|
OutGroupMembQueries *float64
|
||||||
OutGroupMembResponses float64
|
OutGroupMembResponses *float64
|
||||||
OutGroupMembReductions float64
|
OutGroupMembReductions *float64
|
||||||
OutRouterSolicits float64
|
OutRouterSolicits *float64
|
||||||
OutRouterAdvertisements float64
|
OutRouterAdvertisements *float64
|
||||||
OutNeighborSolicits float64
|
OutNeighborSolicits *float64
|
||||||
OutNeighborAdvertisements float64
|
OutNeighborAdvertisements *float64
|
||||||
OutRedirects float64
|
OutRedirects *float64
|
||||||
OutMLDv2Reports float64
|
OutMLDv2Reports *float64
|
||||||
InType1 float64
|
InType1 *float64
|
||||||
InType134 float64
|
InType134 *float64
|
||||||
InType135 float64
|
InType135 *float64
|
||||||
InType136 float64
|
InType136 *float64
|
||||||
InType143 float64
|
InType143 *float64
|
||||||
OutType133 float64
|
OutType133 *float64
|
||||||
OutType135 float64
|
OutType135 *float64
|
||||||
OutType136 float64
|
OutType136 *float64
|
||||||
OutType143 float64
|
OutType143 *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Udp6 struct { // nolint:revive
|
type Udp6 struct { // nolint:revive
|
||||||
InDatagrams float64
|
InDatagrams *float64
|
||||||
NoPorts float64
|
NoPorts *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
OutDatagrams float64
|
OutDatagrams *float64
|
||||||
RcvbufErrors float64
|
RcvbufErrors *float64
|
||||||
SndbufErrors float64
|
SndbufErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
IgnoredMulti float64
|
IgnoredMulti *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type UdpLite6 struct { // nolint:revive
|
type UdpLite6 struct { // nolint:revive
|
||||||
InDatagrams float64
|
InDatagrams *float64
|
||||||
NoPorts float64
|
NoPorts *float64
|
||||||
InErrors float64
|
InErrors *float64
|
||||||
OutDatagrams float64
|
OutDatagrams *float64
|
||||||
RcvbufErrors float64
|
RcvbufErrors *float64
|
||||||
SndbufErrors float64
|
SndbufErrors *float64
|
||||||
InCsumErrors float64
|
InCsumErrors *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) Snmp6() (ProcSnmp6, error) {
|
func (p Proc) Snmp6() (ProcSnmp6, error) {
|
||||||
@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
|||||||
case "Ip6":
|
case "Ip6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InReceives":
|
case "InReceives":
|
||||||
procSnmp6.Ip6.InReceives = value
|
procSnmp6.Ip6.InReceives = &value
|
||||||
case "InHdrErrors":
|
case "InHdrErrors":
|
||||||
procSnmp6.Ip6.InHdrErrors = value
|
procSnmp6.Ip6.InHdrErrors = &value
|
||||||
case "InTooBigErrors":
|
case "InTooBigErrors":
|
||||||
procSnmp6.Ip6.InTooBigErrors = value
|
procSnmp6.Ip6.InTooBigErrors = &value
|
||||||
case "InNoRoutes":
|
case "InNoRoutes":
|
||||||
procSnmp6.Ip6.InNoRoutes = value
|
procSnmp6.Ip6.InNoRoutes = &value
|
||||||
case "InAddrErrors":
|
case "InAddrErrors":
|
||||||
procSnmp6.Ip6.InAddrErrors = value
|
procSnmp6.Ip6.InAddrErrors = &value
|
||||||
case "InUnknownProtos":
|
case "InUnknownProtos":
|
||||||
procSnmp6.Ip6.InUnknownProtos = value
|
procSnmp6.Ip6.InUnknownProtos = &value
|
||||||
case "InTruncatedPkts":
|
case "InTruncatedPkts":
|
||||||
procSnmp6.Ip6.InTruncatedPkts = value
|
procSnmp6.Ip6.InTruncatedPkts = &value
|
||||||
case "InDiscards":
|
case "InDiscards":
|
||||||
procSnmp6.Ip6.InDiscards = value
|
procSnmp6.Ip6.InDiscards = &value
|
||||||
case "InDelivers":
|
case "InDelivers":
|
||||||
procSnmp6.Ip6.InDelivers = value
|
procSnmp6.Ip6.InDelivers = &value
|
||||||
case "OutForwDatagrams":
|
case "OutForwDatagrams":
|
||||||
procSnmp6.Ip6.OutForwDatagrams = value
|
procSnmp6.Ip6.OutForwDatagrams = &value
|
||||||
case "OutRequests":
|
case "OutRequests":
|
||||||
procSnmp6.Ip6.OutRequests = value
|
procSnmp6.Ip6.OutRequests = &value
|
||||||
case "OutDiscards":
|
case "OutDiscards":
|
||||||
procSnmp6.Ip6.OutDiscards = value
|
procSnmp6.Ip6.OutDiscards = &value
|
||||||
case "OutNoRoutes":
|
case "OutNoRoutes":
|
||||||
procSnmp6.Ip6.OutNoRoutes = value
|
procSnmp6.Ip6.OutNoRoutes = &value
|
||||||
case "ReasmTimeout":
|
case "ReasmTimeout":
|
||||||
procSnmp6.Ip6.ReasmTimeout = value
|
procSnmp6.Ip6.ReasmTimeout = &value
|
||||||
case "ReasmReqds":
|
case "ReasmReqds":
|
||||||
procSnmp6.Ip6.ReasmReqds = value
|
procSnmp6.Ip6.ReasmReqds = &value
|
||||||
case "ReasmOKs":
|
case "ReasmOKs":
|
||||||
procSnmp6.Ip6.ReasmOKs = value
|
procSnmp6.Ip6.ReasmOKs = &value
|
||||||
case "ReasmFails":
|
case "ReasmFails":
|
||||||
procSnmp6.Ip6.ReasmFails = value
|
procSnmp6.Ip6.ReasmFails = &value
|
||||||
case "FragOKs":
|
case "FragOKs":
|
||||||
procSnmp6.Ip6.FragOKs = value
|
procSnmp6.Ip6.FragOKs = &value
|
||||||
case "FragFails":
|
case "FragFails":
|
||||||
procSnmp6.Ip6.FragFails = value
|
procSnmp6.Ip6.FragFails = &value
|
||||||
case "FragCreates":
|
case "FragCreates":
|
||||||
procSnmp6.Ip6.FragCreates = value
|
procSnmp6.Ip6.FragCreates = &value
|
||||||
case "InMcastPkts":
|
case "InMcastPkts":
|
||||||
procSnmp6.Ip6.InMcastPkts = value
|
procSnmp6.Ip6.InMcastPkts = &value
|
||||||
case "OutMcastPkts":
|
case "OutMcastPkts":
|
||||||
procSnmp6.Ip6.OutMcastPkts = value
|
procSnmp6.Ip6.OutMcastPkts = &value
|
||||||
case "InOctets":
|
case "InOctets":
|
||||||
procSnmp6.Ip6.InOctets = value
|
procSnmp6.Ip6.InOctets = &value
|
||||||
case "OutOctets":
|
case "OutOctets":
|
||||||
procSnmp6.Ip6.OutOctets = value
|
procSnmp6.Ip6.OutOctets = &value
|
||||||
case "InMcastOctets":
|
case "InMcastOctets":
|
||||||
procSnmp6.Ip6.InMcastOctets = value
|
procSnmp6.Ip6.InMcastOctets = &value
|
||||||
case "OutMcastOctets":
|
case "OutMcastOctets":
|
||||||
procSnmp6.Ip6.OutMcastOctets = value
|
procSnmp6.Ip6.OutMcastOctets = &value
|
||||||
case "InBcastOctets":
|
case "InBcastOctets":
|
||||||
procSnmp6.Ip6.InBcastOctets = value
|
procSnmp6.Ip6.InBcastOctets = &value
|
||||||
case "OutBcastOctets":
|
case "OutBcastOctets":
|
||||||
procSnmp6.Ip6.OutBcastOctets = value
|
procSnmp6.Ip6.OutBcastOctets = &value
|
||||||
case "InNoECTPkts":
|
case "InNoECTPkts":
|
||||||
procSnmp6.Ip6.InNoECTPkts = value
|
procSnmp6.Ip6.InNoECTPkts = &value
|
||||||
case "InECT1Pkts":
|
case "InECT1Pkts":
|
||||||
procSnmp6.Ip6.InECT1Pkts = value
|
procSnmp6.Ip6.InECT1Pkts = &value
|
||||||
case "InECT0Pkts":
|
case "InECT0Pkts":
|
||||||
procSnmp6.Ip6.InECT0Pkts = value
|
procSnmp6.Ip6.InECT0Pkts = &value
|
||||||
case "InCEPkts":
|
case "InCEPkts":
|
||||||
procSnmp6.Ip6.InCEPkts = value
|
procSnmp6.Ip6.InCEPkts = &value
|
||||||
|
|
||||||
}
|
}
|
||||||
case "Icmp6":
|
case "Icmp6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InMsgs":
|
case "InMsgs":
|
||||||
procSnmp6.Icmp6.InMsgs = value
|
procSnmp6.Icmp6.InMsgs = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp6.Icmp6.InErrors = value
|
procSnmp6.Icmp6.InErrors = &value
|
||||||
case "OutMsgs":
|
case "OutMsgs":
|
||||||
procSnmp6.Icmp6.OutMsgs = value
|
procSnmp6.Icmp6.OutMsgs = &value
|
||||||
case "OutErrors":
|
case "OutErrors":
|
||||||
procSnmp6.Icmp6.OutErrors = value
|
procSnmp6.Icmp6.OutErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp6.Icmp6.InCsumErrors = value
|
procSnmp6.Icmp6.InCsumErrors = &value
|
||||||
case "InDestUnreachs":
|
case "InDestUnreachs":
|
||||||
procSnmp6.Icmp6.InDestUnreachs = value
|
procSnmp6.Icmp6.InDestUnreachs = &value
|
||||||
case "InPktTooBigs":
|
case "InPktTooBigs":
|
||||||
procSnmp6.Icmp6.InPktTooBigs = value
|
procSnmp6.Icmp6.InPktTooBigs = &value
|
||||||
case "InTimeExcds":
|
case "InTimeExcds":
|
||||||
procSnmp6.Icmp6.InTimeExcds = value
|
procSnmp6.Icmp6.InTimeExcds = &value
|
||||||
case "InParmProblems":
|
case "InParmProblems":
|
||||||
procSnmp6.Icmp6.InParmProblems = value
|
procSnmp6.Icmp6.InParmProblems = &value
|
||||||
case "InEchos":
|
case "InEchos":
|
||||||
procSnmp6.Icmp6.InEchos = value
|
procSnmp6.Icmp6.InEchos = &value
|
||||||
case "InEchoReplies":
|
case "InEchoReplies":
|
||||||
procSnmp6.Icmp6.InEchoReplies = value
|
procSnmp6.Icmp6.InEchoReplies = &value
|
||||||
case "InGroupMembQueries":
|
case "InGroupMembQueries":
|
||||||
procSnmp6.Icmp6.InGroupMembQueries = value
|
procSnmp6.Icmp6.InGroupMembQueries = &value
|
||||||
case "InGroupMembResponses":
|
case "InGroupMembResponses":
|
||||||
procSnmp6.Icmp6.InGroupMembResponses = value
|
procSnmp6.Icmp6.InGroupMembResponses = &value
|
||||||
case "InGroupMembReductions":
|
case "InGroupMembReductions":
|
||||||
procSnmp6.Icmp6.InGroupMembReductions = value
|
procSnmp6.Icmp6.InGroupMembReductions = &value
|
||||||
case "InRouterSolicits":
|
case "InRouterSolicits":
|
||||||
procSnmp6.Icmp6.InRouterSolicits = value
|
procSnmp6.Icmp6.InRouterSolicits = &value
|
||||||
case "InRouterAdvertisements":
|
case "InRouterAdvertisements":
|
||||||
procSnmp6.Icmp6.InRouterAdvertisements = value
|
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
||||||
case "InNeighborSolicits":
|
case "InNeighborSolicits":
|
||||||
procSnmp6.Icmp6.InNeighborSolicits = value
|
procSnmp6.Icmp6.InNeighborSolicits = &value
|
||||||
case "InNeighborAdvertisements":
|
case "InNeighborAdvertisements":
|
||||||
procSnmp6.Icmp6.InNeighborAdvertisements = value
|
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
||||||
case "InRedirects":
|
case "InRedirects":
|
||||||
procSnmp6.Icmp6.InRedirects = value
|
procSnmp6.Icmp6.InRedirects = &value
|
||||||
case "InMLDv2Reports":
|
case "InMLDv2Reports":
|
||||||
procSnmp6.Icmp6.InMLDv2Reports = value
|
procSnmp6.Icmp6.InMLDv2Reports = &value
|
||||||
case "OutDestUnreachs":
|
case "OutDestUnreachs":
|
||||||
procSnmp6.Icmp6.OutDestUnreachs = value
|
procSnmp6.Icmp6.OutDestUnreachs = &value
|
||||||
case "OutPktTooBigs":
|
case "OutPktTooBigs":
|
||||||
procSnmp6.Icmp6.OutPktTooBigs = value
|
procSnmp6.Icmp6.OutPktTooBigs = &value
|
||||||
case "OutTimeExcds":
|
case "OutTimeExcds":
|
||||||
procSnmp6.Icmp6.OutTimeExcds = value
|
procSnmp6.Icmp6.OutTimeExcds = &value
|
||||||
case "OutParmProblems":
|
case "OutParmProblems":
|
||||||
procSnmp6.Icmp6.OutParmProblems = value
|
procSnmp6.Icmp6.OutParmProblems = &value
|
||||||
case "OutEchos":
|
case "OutEchos":
|
||||||
procSnmp6.Icmp6.OutEchos = value
|
procSnmp6.Icmp6.OutEchos = &value
|
||||||
case "OutEchoReplies":
|
case "OutEchoReplies":
|
||||||
procSnmp6.Icmp6.OutEchoReplies = value
|
procSnmp6.Icmp6.OutEchoReplies = &value
|
||||||
case "OutGroupMembQueries":
|
case "OutGroupMembQueries":
|
||||||
procSnmp6.Icmp6.OutGroupMembQueries = value
|
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
||||||
case "OutGroupMembResponses":
|
case "OutGroupMembResponses":
|
||||||
procSnmp6.Icmp6.OutGroupMembResponses = value
|
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
||||||
case "OutGroupMembReductions":
|
case "OutGroupMembReductions":
|
||||||
procSnmp6.Icmp6.OutGroupMembReductions = value
|
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
||||||
case "OutRouterSolicits":
|
case "OutRouterSolicits":
|
||||||
procSnmp6.Icmp6.OutRouterSolicits = value
|
procSnmp6.Icmp6.OutRouterSolicits = &value
|
||||||
case "OutRouterAdvertisements":
|
case "OutRouterAdvertisements":
|
||||||
procSnmp6.Icmp6.OutRouterAdvertisements = value
|
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
||||||
case "OutNeighborSolicits":
|
case "OutNeighborSolicits":
|
||||||
procSnmp6.Icmp6.OutNeighborSolicits = value
|
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
||||||
case "OutNeighborAdvertisements":
|
case "OutNeighborAdvertisements":
|
||||||
procSnmp6.Icmp6.OutNeighborAdvertisements = value
|
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
||||||
case "OutRedirects":
|
case "OutRedirects":
|
||||||
procSnmp6.Icmp6.OutRedirects = value
|
procSnmp6.Icmp6.OutRedirects = &value
|
||||||
case "OutMLDv2Reports":
|
case "OutMLDv2Reports":
|
||||||
procSnmp6.Icmp6.OutMLDv2Reports = value
|
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
||||||
case "InType1":
|
case "InType1":
|
||||||
procSnmp6.Icmp6.InType1 = value
|
procSnmp6.Icmp6.InType1 = &value
|
||||||
case "InType134":
|
case "InType134":
|
||||||
procSnmp6.Icmp6.InType134 = value
|
procSnmp6.Icmp6.InType134 = &value
|
||||||
case "InType135":
|
case "InType135":
|
||||||
procSnmp6.Icmp6.InType135 = value
|
procSnmp6.Icmp6.InType135 = &value
|
||||||
case "InType136":
|
case "InType136":
|
||||||
procSnmp6.Icmp6.InType136 = value
|
procSnmp6.Icmp6.InType136 = &value
|
||||||
case "InType143":
|
case "InType143":
|
||||||
procSnmp6.Icmp6.InType143 = value
|
procSnmp6.Icmp6.InType143 = &value
|
||||||
case "OutType133":
|
case "OutType133":
|
||||||
procSnmp6.Icmp6.OutType133 = value
|
procSnmp6.Icmp6.OutType133 = &value
|
||||||
case "OutType135":
|
case "OutType135":
|
||||||
procSnmp6.Icmp6.OutType135 = value
|
procSnmp6.Icmp6.OutType135 = &value
|
||||||
case "OutType136":
|
case "OutType136":
|
||||||
procSnmp6.Icmp6.OutType136 = value
|
procSnmp6.Icmp6.OutType136 = &value
|
||||||
case "OutType143":
|
case "OutType143":
|
||||||
procSnmp6.Icmp6.OutType143 = value
|
procSnmp6.Icmp6.OutType143 = &value
|
||||||
}
|
}
|
||||||
case "Udp6":
|
case "Udp6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InDatagrams":
|
case "InDatagrams":
|
||||||
procSnmp6.Udp6.InDatagrams = value
|
procSnmp6.Udp6.InDatagrams = &value
|
||||||
case "NoPorts":
|
case "NoPorts":
|
||||||
procSnmp6.Udp6.NoPorts = value
|
procSnmp6.Udp6.NoPorts = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp6.Udp6.InErrors = value
|
procSnmp6.Udp6.InErrors = &value
|
||||||
case "OutDatagrams":
|
case "OutDatagrams":
|
||||||
procSnmp6.Udp6.OutDatagrams = value
|
procSnmp6.Udp6.OutDatagrams = &value
|
||||||
case "RcvbufErrors":
|
case "RcvbufErrors":
|
||||||
procSnmp6.Udp6.RcvbufErrors = value
|
procSnmp6.Udp6.RcvbufErrors = &value
|
||||||
case "SndbufErrors":
|
case "SndbufErrors":
|
||||||
procSnmp6.Udp6.SndbufErrors = value
|
procSnmp6.Udp6.SndbufErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp6.Udp6.InCsumErrors = value
|
procSnmp6.Udp6.InCsumErrors = &value
|
||||||
case "IgnoredMulti":
|
case "IgnoredMulti":
|
||||||
procSnmp6.Udp6.IgnoredMulti = value
|
procSnmp6.Udp6.IgnoredMulti = &value
|
||||||
}
|
}
|
||||||
case "UdpLite6":
|
case "UdpLite6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InDatagrams":
|
case "InDatagrams":
|
||||||
procSnmp6.UdpLite6.InDatagrams = value
|
procSnmp6.UdpLite6.InDatagrams = &value
|
||||||
case "NoPorts":
|
case "NoPorts":
|
||||||
procSnmp6.UdpLite6.NoPorts = value
|
procSnmp6.UdpLite6.NoPorts = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp6.UdpLite6.InErrors = value
|
procSnmp6.UdpLite6.InErrors = &value
|
||||||
case "OutDatagrams":
|
case "OutDatagrams":
|
||||||
procSnmp6.UdpLite6.OutDatagrams = value
|
procSnmp6.UdpLite6.OutDatagrams = &value
|
||||||
case "RcvbufErrors":
|
case "RcvbufErrors":
|
||||||
procSnmp6.UdpLite6.RcvbufErrors = value
|
procSnmp6.UdpLite6.RcvbufErrors = &value
|
||||||
case "SndbufErrors":
|
case "SndbufErrors":
|
||||||
procSnmp6.UdpLite6.SndbufErrors = value
|
procSnmp6.UdpLite6.SndbufErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp6.UdpLite6.InCsumErrors = value
|
procSnmp6.UdpLite6.InCsumErrors = &value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -102,6 +102,8 @@ type ProcStat struct {
|
|||||||
RSS int
|
RSS int
|
||||||
// Soft limit in bytes on the rss of the process.
|
// Soft limit in bytes on the rss of the process.
|
||||||
RSSLimit uint64
|
RSSLimit uint64
|
||||||
|
// CPU number last executed on.
|
||||||
|
Processor uint
|
||||||
// Real-time scheduling priority, a number in the range 1 to 99 for processes
|
// Real-time scheduling priority, a number in the range 1 to 99 for processes
|
||||||
// scheduled under a real-time policy, or 0, for non-real-time processes.
|
// scheduled under a real-time policy, or 0, for non-real-time processes.
|
||||||
RTPriority uint
|
RTPriority uint
|
||||||
@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||||||
&ignoreUint64,
|
&ignoreUint64,
|
||||||
&ignoreUint64,
|
&ignoreUint64,
|
||||||
&ignoreInt64,
|
&ignoreInt64,
|
||||||
&ignoreInt64,
|
&s.Processor,
|
||||||
&s.RTPriority,
|
&s.RTPriority,
|
||||||
&s.Policy,
|
&s.Policy,
|
||||||
&s.DelayAcctBlkIOTicks,
|
&s.DelayAcctBlkIOTicks,
|
||||||
|
6
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
6
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
|
|||||||
kv := strings.SplitN(line, ":", 2)
|
kv := strings.SplitN(line, ":", 2)
|
||||||
|
|
||||||
// removes spaces
|
// removes spaces
|
||||||
k := string(strings.TrimSpace(kv[0]))
|
k := strings.TrimSpace(kv[0])
|
||||||
v := string(strings.TrimSpace(kv[1]))
|
v := strings.TrimSpace(kv[1])
|
||||||
// removes "kB"
|
// removes "kB"
|
||||||
v = string(bytes.Trim([]byte(v), " kB"))
|
v = strings.TrimSuffix(v, " kB")
|
||||||
|
|
||||||
// value to int when possible
|
// value to int when possible
|
||||||
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
||||||
|
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -62,7 +62,7 @@ type Stat struct {
|
|||||||
// Summed up cpu statistics.
|
// Summed up cpu statistics.
|
||||||
CPUTotal CPUStat
|
CPUTotal CPUStat
|
||||||
// Per-CPU statistics.
|
// Per-CPU statistics.
|
||||||
CPU []CPUStat
|
CPU map[int64]CPUStat
|
||||||
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
||||||
IRQTotal uint64
|
IRQTotal uint64
|
||||||
// Number of times a numbered IRQ was triggered.
|
// Number of times a numbered IRQ was triggered.
|
||||||
@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
procStat, err := parseStat(bytes.NewReader(data), fileName)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
return procStat, nil
|
||||||
|
}
|
||||||
|
|
||||||
stat := Stat{}
|
// parseStat parses the metrics from /proc/[pid]/stat.
|
||||||
|
func parseStat(r io.Reader, fileName string) (Stat, error) {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
stat = Stat{
|
||||||
|
CPU: make(map[int64]CPUStat),
|
||||||
|
}
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
parts := strings.Fields(scanner.Text())
|
parts := strings.Fields(scanner.Text())
|
||||||
@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) {
|
|||||||
if cpuID == -1 {
|
if cpuID == -1 {
|
||||||
stat.CPUTotal = cpuStat
|
stat.CPUTotal = cpuStat
|
||||||
} else {
|
} else {
|
||||||
for int64(len(stat.CPU)) <= cpuID {
|
|
||||||
stat.CPU = append(stat.CPU, CPUStat{})
|
|
||||||
}
|
|
||||||
stat.CPU[cpuID] = cpuStat
|
stat.CPU[cpuID] = cpuStat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
79
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
Normal file
79
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
fsi "github.com/prometheus/procfs/internal/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provide access to /proc/PID/task/TID files, for thread specific values. Since
|
||||||
|
// such files have the same structure as /proc/PID/ ones, the data structures
|
||||||
|
// and the parsers for the latter may be reused.
|
||||||
|
|
||||||
|
// AllThreads returns a list of all currently available threads under /proc/PID.
|
||||||
|
func AllThreads(pid int) (Procs, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return Procs{}, err
|
||||||
|
}
|
||||||
|
return fs.AllThreads(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllThreads returns a list of all currently available threads for PID.
|
||||||
|
func (fs FS) AllThreads(pid int) (Procs, error) {
|
||||||
|
taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
|
||||||
|
d, err := os.Open(taskPath)
|
||||||
|
if err != nil {
|
||||||
|
return Procs{}, err
|
||||||
|
}
|
||||||
|
defer d.Close()
|
||||||
|
|
||||||
|
names, err := d.Readdirnames(-1)
|
||||||
|
if err != nil {
|
||||||
|
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t := Procs{}
|
||||||
|
for _, n := range names {
|
||||||
|
tid, err := strconv.ParseInt(n, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thread returns a process for a given PID, TID.
|
||||||
|
func (fs FS) Thread(pid, tid int) (Proc, error) {
|
||||||
|
taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
|
||||||
|
if _, err := os.Stat(taskPath); err != nil {
|
||||||
|
return Proc{}, err
|
||||||
|
}
|
||||||
|
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thread returns a process for a given TID of Proc.
|
||||||
|
func (proc Proc) Thread(tid int) (Proc, error) {
|
||||||
|
tfs := fsi.FS(proc.path("task"))
|
||||||
|
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
|
||||||
|
return Proc{}, err
|
||||||
|
}
|
||||||
|
return Proc{PID: tid, fs: tfs}, nil
|
||||||
|
}
|
2
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
2
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
@ -26,7 +26,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// The VM interface is described at
|
// The VM interface is described at
|
||||||
|
//
|
||||||
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
|
//
|
||||||
// Each setting is exposed as a single file.
|
// Each setting is exposed as a single file.
|
||||||
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
||||||
// and numa_zonelist_order (deprecated) which is a string.
|
// and numa_zonelist_order (deprecated) which is a string.
|
||||||
|
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
@ -1,71 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
|
||||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Do sends an HTTP request with the provided http.Client and returns
|
|
||||||
// an HTTP response.
|
|
||||||
//
|
|
||||||
// If the client is nil, http.DefaultClient is used.
|
|
||||||
//
|
|
||||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
|
||||||
// ctx.Err() will be returned.
|
|
||||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
|
||||||
if client == nil {
|
|
||||||
client = http.DefaultClient
|
|
||||||
}
|
|
||||||
resp, err := client.Do(req.WithContext(ctx))
|
|
||||||
// If we got an error, and the context has been canceled,
|
|
||||||
// the context's error is probably more useful.
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get issues a GET request via the Do function.
|
|
||||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Head issues a HEAD request via the Do function.
|
|
||||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("HEAD", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post issues a POST request via the Do function.
|
|
||||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("POST", url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", bodyType)
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostForm issues a POST request via the Do function.
|
|
||||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
|
||||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
|
||||||
}
|
|
4
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
4
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
@ -19,8 +19,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Token represents the credentials used to authorize
|
// Token represents the credentials used to authorize
|
||||||
@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
||||||
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
|
r, err := ContextClient(ctx).Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
15
vendor/modules.txt
vendored
15
vendor/modules.txt
vendored
@ -360,7 +360,7 @@ github.com/mattn/go-colorable
|
|||||||
# github.com/mattn/go-isatty v0.0.16
|
# github.com/mattn/go-isatty v0.0.16
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/mattn/go-isatty
|
github.com/mattn/go-isatty
|
||||||
# github.com/matttproud/golang_protobuf_extensions v1.0.2
|
# github.com/matttproud/golang_protobuf_extensions v1.0.4
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||||
# github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
|
# github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
|
||||||
@ -437,7 +437,7 @@ github.com/pkg/xattr
|
|||||||
# github.com/pmezard/go-difflib v1.0.0
|
# github.com/pmezard/go-difflib v1.0.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/prometheus/client_golang v1.14.0
|
# github.com/prometheus/client_golang v1.15.1
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/prometheus/client_golang/prometheus
|
github.com/prometheus/client_golang/prometheus
|
||||||
github.com/prometheus/client_golang/prometheus/collectors
|
github.com/prometheus/client_golang/prometheus/collectors
|
||||||
@ -448,13 +448,13 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint
|
|||||||
# github.com/prometheus/client_model v0.3.0
|
# github.com/prometheus/client_model v0.3.0
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
github.com/prometheus/client_model/go
|
github.com/prometheus/client_model/go
|
||||||
# github.com/prometheus/common v0.37.0
|
# github.com/prometheus/common v0.42.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.18
|
||||||
github.com/prometheus/common/expfmt
|
github.com/prometheus/common/expfmt
|
||||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||||
github.com/prometheus/common/model
|
github.com/prometheus/common/model
|
||||||
# github.com/prometheus/procfs v0.8.0
|
# github.com/prometheus/procfs v0.9.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.18
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
github.com/prometheus/procfs/internal/fs
|
github.com/prometheus/procfs/internal/fs
|
||||||
github.com/prometheus/procfs/internal/util
|
github.com/prometheus/procfs/internal/util
|
||||||
@ -560,7 +560,6 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
|||||||
# golang.org/x/net v0.9.0
|
# golang.org/x/net v0.9.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/net/context
|
golang.org/x/net/context
|
||||||
golang.org/x/net/context/ctxhttp
|
|
||||||
golang.org/x/net/html
|
golang.org/x/net/html
|
||||||
golang.org/x/net/html/atom
|
golang.org/x/net/html/atom
|
||||||
golang.org/x/net/html/charset
|
golang.org/x/net/html/charset
|
||||||
@ -572,7 +571,7 @@ golang.org/x/net/internal/socks
|
|||||||
golang.org/x/net/internal/timeseries
|
golang.org/x/net/internal/timeseries
|
||||||
golang.org/x/net/proxy
|
golang.org/x/net/proxy
|
||||||
golang.org/x/net/trace
|
golang.org/x/net/trace
|
||||||
# golang.org/x/oauth2 v0.4.0
|
# golang.org/x/oauth2 v0.5.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/oauth2
|
golang.org/x/oauth2
|
||||||
golang.org/x/oauth2/internal
|
golang.org/x/oauth2/internal
|
||||||
|
Loading…
Reference in New Issue
Block a user