mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
Remove nsenter packages from vendor
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
70d49b4e47
commit
d4a67c05f3
153
Gopkg.lock
generated
153
Gopkg.lock
generated
@ -9,8 +9,8 @@
|
|||||||
"pkg/guid",
|
"pkg/guid",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "a969fb018bd3439cc50861ce06796c40430e3a65"
|
revision = "6c72808b55902eae4c5943626030429ff20f3b63"
|
||||||
version = "v0.4.13"
|
version = "v0.4.14"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6"
|
digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6"
|
||||||
@ -74,15 +74,15 @@
|
|||||||
revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
|
revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6dd2b549cc43ec65eb4c2a66364a76aa947f20dbf8bf59440b837d4546be9448"
|
digest = "1:8b01feb9e9fe1983f75d8ae8e948d3d62731fbff4ea4ab6a33d25731a3df4247"
|
||||||
name = "github.com/emicklei/go-restful"
|
name = "github.com/emicklei/go-restful"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"log",
|
"log",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "6ac3b8eb89d325e5c750d77f344a6870464d03c3"
|
revision = "f48aa74d360eda838561e6db9d36c6538398343f"
|
||||||
version = "v2.9.6"
|
version = "v2.10.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:ad32dc29f37281bacb5dcedff17c9461dc1739dc8a5f63a71ab491c6e92edf8d"
|
digest = "1:ad32dc29f37281bacb5dcedff17c9461dc1739dc8a5f63a71ab491c6e92edf8d"
|
||||||
@ -117,39 +117,39 @@
|
|||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441"
|
digest = "1:3758c86e787dfe5792a23430f34636106a16da914446724399c9c12f121a225d"
|
||||||
name = "github.com/go-openapi/jsonpointer"
|
name = "github.com/go-openapi/jsonpointer"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "a105a905c5e6ad147f08504784917f3e178e0ba5"
|
revision = "ed123515f087412cd7ef02e49b0b0a5e6a79a360"
|
||||||
version = "v0.19.2"
|
version = "v0.19.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
|
digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546"
|
||||||
name = "github.com/go-openapi/jsonreference"
|
name = "github.com/go-openapi/jsonreference"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "2903bfd4bfbaf188694f1edf731f2725a8fa344f"
|
revision = "82f31475a8f7a12bc26962f6e26ceade8ea6f66a"
|
||||||
version = "v0.19.2"
|
version = "v0.19.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:f04cb7797917efbe9d9efd509a311f28930a9ee7ef1e047d01808d64ded53554"
|
digest = "1:7fc0908f885357d71c0fa50bb2333eb63ff6f9402584e7f8ac13f5cdf6e967d5"
|
||||||
name = "github.com/go-openapi/spec"
|
name = "github.com/go-openapi/spec"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "bdfd7e07daecc404d77868a88b2364d0aed0ee5a"
|
revision = "2223ab324566e4ace63ab69b9c8fff1b81a40eeb"
|
||||||
version = "v0.19.2"
|
version = "v0.19.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:b2ea9a25f2ca9ebea4e58ec61832a80caee98d85038563e43f774b37ef990ebb"
|
digest = "1:b318f36b1725220c1580b27504b8ee9d33a2a8e2db58f28c176edf6f0d1b7fb2"
|
||||||
name = "github.com/go-openapi/swag"
|
name = "github.com/go-openapi/swag"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "de649ffb9e02183a414820c5b1b4582f7b009792"
|
revision = "c3d0f7896d589f3babb99eea24bbc7de98108e72"
|
||||||
version = "v0.19.4"
|
version = "v0.19.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:37234906013da82d4c05666262eda5bdec8f736bafa7d4ec1fb3314e965b476f"
|
digest = "1:cba177b91e20ac24abe7fb21265f28aa1d2606d7a56c1726a5f53cb0abcb154c"
|
||||||
name = "github.com/gogo/protobuf"
|
name = "github.com/gogo/protobuf"
|
||||||
packages = [
|
packages = [
|
||||||
"gogoproto",
|
"gogoproto",
|
||||||
@ -158,8 +158,8 @@
|
|||||||
"sortkeys",
|
"sortkeys",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
revision = "0ca988a254f991240804bf9821f3450d87ccbb1b"
|
||||||
version = "v1.2.1"
|
version = "v1.3.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -195,7 +195,7 @@
|
|||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:bf40199583e5143d1472fc34d10d6f4b69d97572142acf343b3e43136da40823"
|
digest = "1:1d1cbf539d9ac35eb3148129f96be5537f1a1330cadcc7e3a83b4e72a59672a3"
|
||||||
name = "github.com/google/go-cmp"
|
name = "github.com/google/go-cmp"
|
||||||
packages = [
|
packages = [
|
||||||
"cmp",
|
"cmp",
|
||||||
@ -205,8 +205,8 @@
|
|||||||
"cmp/internal/value",
|
"cmp/internal/value",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b"
|
revision = "2d0692c2e9617365a95b295612ac0d4415ba4627"
|
||||||
version = "v0.3.0"
|
version = "v0.3.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||||
@ -225,7 +225,7 @@
|
|||||||
version = "v1.1.1"
|
version = "v1.1.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:459a00967aaf06edff3228e128dd243d7c91b0fc11ad2f7ceaa98f094bf66796"
|
digest = "1:5e092394bed250d7fda36cef8b7e1d22bb2d5f71878bbb137be5fc1c2705f965"
|
||||||
name = "github.com/googleapis/gnostic"
|
name = "github.com/googleapis/gnostic"
|
||||||
packages = [
|
packages = [
|
||||||
"OpenAPIv2",
|
"OpenAPIv2",
|
||||||
@ -233,8 +233,8 @@
|
|||||||
"extensions",
|
"extensions",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "e73c7ec21d36ddb0711cb36d1502d18363b5c2c9"
|
revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab"
|
||||||
version = "v0.3.0"
|
version = "v0.3.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -248,12 +248,12 @@
|
|||||||
revision = "901d90724c7919163f472a9812253fb26761123d"
|
revision = "901d90724c7919163f472a9812253fb26761123d"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:1b497700cbc61544430babb4e4fd4313758bb9fec7144ca50c625fa6ce8266c7"
|
digest = "1:432e8b04365b52add2d6256a42de45cebebf6b7a132d4eacb052602fe199a80a"
|
||||||
name = "github.com/grpc-ecosystem/go-grpc-middleware"
|
name = "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "c250d6563d4d4c20252cd865923440e829844f4e"
|
revision = "dd15ed025b6054e5253963e355991f3070d4e593"
|
||||||
version = "v1.0.0"
|
version = "v1.1.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:5872c7f130f62fc34bfda20babad36be6309c00b5c9207717f7cd2a51536fff4"
|
digest = "1:5872c7f130f62fc34bfda20babad36be6309c00b5c9207717f7cd2a51536fff4"
|
||||||
@ -333,11 +333,10 @@
|
|||||||
"pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1",
|
"pkg/client/clientset/versioned/typed/volumesnapshot/v1alpha1",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "b3f591d85cce516e431c70e5337d5c67611ae2fe"
|
revision = "34fa9aad47eef93ca8281c2a029f031852cfefa5"
|
||||||
version = "v1.2.0"
|
version = "v1.2.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
digest = "1:927762c6729b4e72957ba3310e485ed09cf8451c5a637a52fd016a9fe09e7936"
|
digest = "1:927762c6729b4e72957ba3310e485ed09cf8451c5a637a52fd016a9fe09e7936"
|
||||||
name = "github.com/mailru/easyjson"
|
name = "github.com/mailru/easyjson"
|
||||||
packages = [
|
packages = [
|
||||||
@ -346,7 +345,8 @@
|
|||||||
"jwriter",
|
"jwriter",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "b2ccc519800e761ac8000b95e5d57c80a897ff9e"
|
revision = "1b2b06f5f209fea48ff5922d8bfb2b9ed5d8f00b"
|
||||||
|
version = "v0.7.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||||
@ -373,7 +373,7 @@
|
|||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:3c46171ee5eee66086897e1efca67b84bf552b1f80039d421068c90684868194"
|
digest = "1:a8ce19ad7ec609c1b417d0719810d603e691941f3dac17fed958eee74b1e3622"
|
||||||
name = "github.com/onsi/ginkgo"
|
name = "github.com/onsi/ginkgo"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -396,11 +396,11 @@
|
|||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "eea6ad008b96acdaa524f5b409513bf062b500ad"
|
revision = "974566c482abc93ffd3df6f4626e79076c7ed290"
|
||||||
version = "v1.8.0"
|
version = "v1.10.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6bc9eee358de51d995955be7609ed71c946ede28498f1d918456f74c1a1a8953"
|
digest = "1:2892fb9c0ff1df871c8eb4fea05a37920cc3c2c74d7f9dd7177583da3054f38a"
|
||||||
name = "github.com/onsi/gomega"
|
name = "github.com/onsi/gomega"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -417,8 +417,8 @@
|
|||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2"
|
revision = "bdebf9e0ece900259084cfa4121b97ce1a540939"
|
||||||
version = "v1.5.0"
|
version = "v1.7.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0"
|
digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0"
|
||||||
@ -486,10 +486,10 @@
|
|||||||
name = "github.com/prometheus/client_model"
|
name = "github.com/prometheus/client_model"
|
||||||
packages = ["go"]
|
packages = ["go"]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
|
revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:d03ca24670416dc8fccc78b05d6736ec655416ca7db0a028e8fb92cfdfe3b55e"
|
digest = "1:98278956c7c550efc75a027e528aa51743f06fd0e33613d7ed224432a11e5ecf"
|
||||||
name = "github.com/prometheus/common"
|
name = "github.com/prometheus/common"
|
||||||
packages = [
|
packages = [
|
||||||
"expfmt",
|
"expfmt",
|
||||||
@ -497,19 +497,20 @@
|
|||||||
"model",
|
"model",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "31bed53e4047fd6c510e43a941f90cb31be0972a"
|
revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
|
||||||
version = "v0.6.0"
|
version = "v0.7.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:19305fc369377c111c865a7a01e11c675c57c52a932353bbd4ea360bd5b72d99"
|
digest = "1:bbbacd138cb711e328390a2d4bfaca1a41a8575f3c893450bf2ea1b74acdc7be"
|
||||||
name = "github.com/prometheus/procfs"
|
name = "github.com/prometheus/procfs"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"internal/fs",
|
"internal/fs",
|
||||||
|
"internal/util",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "3f98efb27840a48a7a2898ec80be07674d19f9c8"
|
revision = "499c85531f756d1129edd26485a5f73871eeb308"
|
||||||
version = "v0.0.3"
|
version = "v0.0.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6792bb72ea0e7112157d02e4e175cd421b43d004a853f56316a19beca6e0c074"
|
digest = "1:6792bb72ea0e7112157d02e4e175cd421b43d004a853f56316a19beca6e0c074"
|
||||||
@ -531,27 +532,27 @@
|
|||||||
version = "v0.0.5"
|
version = "v0.0.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
digest = "1:d115708a27806831d1babbc6f28a7478ddf8e32a7095507c816e2515fb03a810"
|
||||||
name = "github.com/spf13/pflag"
|
name = "github.com/spf13/pflag"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
|
||||||
version = "v1.0.3"
|
version = "v1.0.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:85adecf1dbfae5769cc62a8bcea6498f8e9f0e2452e4e6686eb36fa4428a5a6e"
|
digest = "1:7096df4a9258067bb9ff5da62864a645d38dd94cd3f273ff4ab54d0de548a348"
|
||||||
name = "github.com/stretchr/testify"
|
name = "github.com/stretchr/testify"
|
||||||
packages = [
|
packages = [
|
||||||
"assert",
|
"assert",
|
||||||
"require",
|
"require",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
|
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
|
||||||
version = "v1.3.0"
|
version = "v1.4.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:cc7c5d88eaf1f0dda299c0cda5984dbf9fd1eabc127f5b804c79bae6e54d82fe"
|
digest = "1:f90b0e268b7bc48a028c62505b1f00b0262bf47b6abc8a7cf9b7b7d371c8fa20"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
packages = [
|
packages = [
|
||||||
"curve25519",
|
"curve25519",
|
||||||
@ -565,11 +566,11 @@
|
|||||||
"ssh/terminal",
|
"ssh/terminal",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
|
revision = "227b76d455e791cb042b03e633e2f7fbcfdf74a5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:b026c2dc7d306ce6bab89cdaa8c72404e9b264d98c8abdd3c967f607d82a3d3e"
|
digest = "1:8dbf67013f9ebde4e58577049b3ec7c145f99b56a85508e0f7f8fa9517f537c3"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = [
|
packages = [
|
||||||
"context",
|
"context",
|
||||||
@ -586,7 +587,7 @@
|
|||||||
"websocket",
|
"websocket",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "ca1201d0de80cfde86cb01aea620983605dfe99b"
|
revision = "a8b05e9114ab0cb08faec337c959aed24b68bf50"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -601,7 +602,7 @@
|
|||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:816780136a1ee09b3070cdfed092c244010fa3f2bde27beb1b1f80dfef4338e1"
|
digest = "1:589aebca3b78c9aa5a4fdb64c8f4a8a4d866351675254a8896f6dad403fc8623"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = [
|
packages = [
|
||||||
"cpu",
|
"cpu",
|
||||||
@ -609,7 +610,7 @@
|
|||||||
"windows",
|
"windows",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "cbf593c0f2f39034e9104bbf77e2ec7c48c98fc5"
|
revision = "0c1ff786ef13daa914a3351c5e6b0321aed5960e"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:0b5dc8c3581fc3ea2b80cc2e360dfb9c2d61dd0cba0d2fe247e8edd3e83f7551"
|
digest = "1:0b5dc8c3581fc3ea2b80cc2e360dfb9c2d61dd0cba0d2fe247e8edd3e83f7551"
|
||||||
@ -670,8 +671,8 @@
|
|||||||
"urlfetch",
|
"urlfetch",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "b2f4a3cf3c67576a2ee09e1fe62656a5086ce880"
|
revision = "5f2a59506353b8d5ba8cbbcd9f3c1f41f1eaf079"
|
||||||
version = "v1.6.1"
|
version = "v1.6.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -679,10 +680,10 @@
|
|||||||
name = "google.golang.org/genproto"
|
name = "google.golang.org/genproto"
|
||||||
packages = ["googleapis/rpc/status"]
|
packages = ["googleapis/rpc/status"]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "c506a9f9061087022822e8da603a52fc387115a8"
|
revision = "f660b865573183437d2d868f703fe88bb8af0b55"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:b5fb6a9fb9d276fe26f303d502e79a7e93afcaae43d05dc95b4993f5b59c6c6a"
|
digest = "1:53c75e2976d7b405f40b4a6c6104c74b20dc4fdc3a481e5dcbe953c95432ed37"
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -720,8 +721,8 @@
|
|||||||
"tap",
|
"tap",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "045159ad57f3781d409358e3ade910a018c16b30"
|
revision = "39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6"
|
||||||
version = "v1.22.1"
|
version = "v1.23.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
|
digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129"
|
||||||
@ -1110,7 +1111,7 @@
|
|||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:dd46fc9a38affca84442cb92686f6d2211cf7a7dfc30a337afcc6f883502ca89"
|
digest = "1:ee858dec8a6688bf0e8286ddbcadf78e4e0fcff82a2489e20306205488a50954"
|
||||||
name = "k8s.io/cloud-provider"
|
name = "k8s.io/cloud-provider"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -1119,7 +1120,7 @@
|
|||||||
"volume/helpers",
|
"volume/helpers",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "4405817736360488dfda1950e6266d2ff4bcba9e"
|
revision = "3af65fe0d6277c25902a906729467dfa84ff6198"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -1130,7 +1131,7 @@
|
|||||||
"featuregate",
|
"featuregate",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "8d609ba1a28a47e7880ecf03fd75faba56797aad"
|
revision = "61bc4cc48c91118b272b37f356826d1a9ff06603"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -1138,15 +1139,15 @@
|
|||||||
name = "k8s.io/cri-api"
|
name = "k8s.io/cri-api"
|
||||||
packages = ["pkg/apis/runtime/v1alpha2"]
|
packages = ["pkg/apis/runtime/v1alpha2"]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "3baa588ab5670ff6ff8a6fcb4a8ec9234a2346c4"
|
revision = "24ae4d4e8b036b885ee1f4930ec2b173eabb28e7"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:43099cc4ed575c40f80277c7ba7168df37d0c663bdc4f541325430bd175cce8a"
|
digest = "1:c0693cb981f43d82a767a3217b7640a4bdb341731d3814b38602f4e5dc4f01b3"
|
||||||
name = "k8s.io/klog"
|
name = "k8s.io/klog"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "d98d8acdac006fb39831f1b25640813fef9c314f"
|
revision = "2ca9ad30301bf30a8a6e0fa2110db6b8df699a91"
|
||||||
version = "v0.3.3"
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@ -1157,10 +1158,10 @@
|
|||||||
"pkg/util/proto",
|
"pkg/util/proto",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "5e22f3d471e6f24ca20becfdffdc6206c7cecac8"
|
revision = "0270cf2f1c1d995d34b36019a6f65d58e6e33ad4"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:0d867e47afa8b49c0b89e9d79b9c34729f951b7d3e78d595a15a42ed4069cb40"
|
digest = "1:8b03c9d8a1cc5d810d04a33a36cf02ec9919195e1e04c73b83a1ab30b8489f66"
|
||||||
name = "k8s.io/kubernetes"
|
name = "k8s.io/kubernetes"
|
||||||
packages = [
|
packages = [
|
||||||
"pkg/api/legacyscheme",
|
"pkg/api/legacyscheme",
|
||||||
@ -1263,7 +1264,6 @@
|
|||||||
"pkg/volume",
|
"pkg/volume",
|
||||||
"pkg/volume/util",
|
"pkg/volume/util",
|
||||||
"pkg/volume/util/fs",
|
"pkg/volume/util/fs",
|
||||||
"pkg/volume/util/nsenter",
|
|
||||||
"pkg/volume/util/quota",
|
"pkg/volume/util/quota",
|
||||||
"pkg/volume/util/quota/common",
|
"pkg/volume/util/quota/common",
|
||||||
"pkg/volume/util/recyclerclient",
|
"pkg/volume/util/recyclerclient",
|
||||||
@ -1289,7 +1289,7 @@
|
|||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:9300c13de75a1813f7c386de9aebc7dc49f490bb77369cb9d50af571dd9acf52"
|
digest = "1:770a7cde06154023b8e245e1d6a53deb8f3e87ec11055b395909d67524465d69"
|
||||||
name = "k8s.io/utils"
|
name = "k8s.io/utils"
|
||||||
packages = [
|
packages = [
|
||||||
"buffer",
|
"buffer",
|
||||||
@ -1305,7 +1305,7 @@
|
|||||||
"trace",
|
"trace",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "581e00157fb1a0435d4fac54a52d1ca1e481d60e"
|
revision = "5008bf6f8cd62f4b52816cfa99163fedb053d0be"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:cb422c75bab66a8339a38b64e837f3b28f3d5a8c06abd7b9048f420363baa18a"
|
digest = "1:cb422c75bab66a8339a38b64e837f3b28f3d5a8c06abd7b9048f420363baa18a"
|
||||||
@ -1385,12 +1385,9 @@
|
|||||||
"k8s.io/kubernetes/pkg/client/conditions",
|
"k8s.io/kubernetes/pkg/client/conditions",
|
||||||
"k8s.io/kubernetes/pkg/util/mount",
|
"k8s.io/kubernetes/pkg/util/mount",
|
||||||
"k8s.io/kubernetes/pkg/volume",
|
"k8s.io/kubernetes/pkg/volume",
|
||||||
"k8s.io/kubernetes/pkg/volume/util/nsenter",
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework",
|
"k8s.io/kubernetes/test/e2e/framework",
|
||||||
"k8s.io/kubernetes/test/e2e/framework/log",
|
"k8s.io/kubernetes/test/e2e/framework/log",
|
||||||
"k8s.io/kubernetes/test/utils",
|
"k8s.io/kubernetes/test/utils",
|
||||||
"k8s.io/utils/exec",
|
|
||||||
"k8s.io/utils/nsenter",
|
|
||||||
]
|
]
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
13
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
13
vendor/github.com/emicklei/go-restful/curly.go
generated
vendored
@ -47,7 +47,7 @@ func (c CurlyRouter) SelectRoute(
|
|||||||
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
|
||||||
candidates := make(sortableCurlyRoutes, 0, 8)
|
candidates := make(sortableCurlyRoutes, 0, 8)
|
||||||
for _, each := range ws.routes {
|
for _, each := range ws.routes {
|
||||||
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
|
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
|
||||||
if matches {
|
if matches {
|
||||||
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortab
|
|||||||
}
|
}
|
||||||
|
|
||||||
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
|
||||||
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
|
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) {
|
||||||
if len(routeTokens) < len(requestTokens) {
|
if len(routeTokens) < len(requestTokens) {
|
||||||
// proceed in matching only if last routeToken is wildcard
|
// proceed in matching only if last routeToken is wildcard
|
||||||
count := len(routeTokens)
|
count := len(routeTokens)
|
||||||
@ -72,6 +72,15 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
|
|||||||
return false, 0, 0
|
return false, 0, 0
|
||||||
}
|
}
|
||||||
requestToken := requestTokens[i]
|
requestToken := requestTokens[i]
|
||||||
|
if routeHasCustomVerb && hasCustomVerb(routeToken){
|
||||||
|
if !isMatchCustomVerb(routeToken, requestToken) {
|
||||||
|
return false, 0, 0
|
||||||
|
}
|
||||||
|
staticCount++
|
||||||
|
requestToken = removeCustomVerb(requestToken)
|
||||||
|
routeToken = removeCustomVerb(routeToken)
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(routeToken, "{") {
|
if strings.HasPrefix(routeToken, "{") {
|
||||||
paramCount++
|
paramCount++
|
||||||
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
if colon := strings.Index(routeToken, ":"); colon != -1 {
|
||||||
|
29
vendor/github.com/emicklei/go-restful/custom_verb.go
generated
vendored
Normal file
29
vendor/github.com/emicklei/go-restful/custom_verb.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package restful
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
|
||||||
|
)
|
||||||
|
|
||||||
|
func hasCustomVerb(routeToken string) bool {
|
||||||
|
return customVerbReg.MatchString(routeToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMatchCustomVerb(routeToken string, pathToken string) bool {
|
||||||
|
rs := customVerbReg.FindStringSubmatch(routeToken)
|
||||||
|
if len(rs) < 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
customVerb := rs[1]
|
||||||
|
specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb))
|
||||||
|
return specificVerbReg.MatchString(pathToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeCustomVerb(str string) string {
|
||||||
|
return customVerbReg.ReplaceAllString(str, "")
|
||||||
|
}
|
5
vendor/github.com/emicklei/go-restful/path_processor.go
generated
vendored
5
vendor/github.com/emicklei/go-restful/path_processor.go
generated
vendored
@ -29,6 +29,11 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath
|
|||||||
} else {
|
} else {
|
||||||
value = urlParts[i]
|
value = urlParts[i]
|
||||||
}
|
}
|
||||||
|
if r.hasCustomVerb && hasCustomVerb(key) {
|
||||||
|
key = removeCustomVerb(key)
|
||||||
|
value = removeCustomVerb(value)
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(key, "{") { // path-parameter
|
if strings.HasPrefix(key, "{") { // path-parameter
|
||||||
if colon := strings.Index(key, ":"); colon != -1 {
|
if colon := strings.Index(key, ":"); colon != -1 {
|
||||||
// extract by regex
|
// extract by regex
|
||||||
|
4
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
4
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
@ -49,11 +49,15 @@ type Route struct {
|
|||||||
|
|
||||||
//Overrides the container.contentEncodingEnabled
|
//Overrides the container.contentEncodingEnabled
|
||||||
contentEncodingEnabled *bool
|
contentEncodingEnabled *bool
|
||||||
|
|
||||||
|
// indicate route path has custom verb
|
||||||
|
hasCustomVerb bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize for Route
|
// Initialize for Route
|
||||||
func (r *Route) postBuild() {
|
func (r *Route) postBuild() {
|
||||||
r.pathParts = tokenizePath(r.Path)
|
r.pathParts = tokenizePath(r.Path)
|
||||||
|
r.hasCustomVerb = hasCustomVerb(r.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create Request and Response from their http versions
|
// Create Request and Response from their http versions
|
||||||
|
2
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
@ -188,7 +188,7 @@ func (w *WebService) RemoveRoute(path, method string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newRoutes[current] = w.routes[ix]
|
newRoutes[current] = w.routes[ix]
|
||||||
current = current + 1
|
current++
|
||||||
}
|
}
|
||||||
w.routes = newRoutes
|
w.routes = newRoutes
|
||||||
return nil
|
return nil
|
||||||
|
2
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
2
vendor/github.com/go-openapi/jsonpointer/pointer.go
generated
vendored
@ -135,7 +135,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
|
|||||||
kv := reflect.ValueOf(decodedToken)
|
kv := reflect.ValueOf(decodedToken)
|
||||||
mv := rValue.MapIndex(kv)
|
mv := rValue.MapIndex(kv)
|
||||||
|
|
||||||
if mv.IsValid() && !swag.IsZero(mv) {
|
if mv.IsValid() {
|
||||||
return mv.Interface(), kind, nil
|
return mv.Interface(), kind, nil
|
||||||
}
|
}
|
||||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
||||||
|
8
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
8
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
|||||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
@ -29,7 +29,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
|
|||||||
clErr := gz.Close()
|
clErr := gz.Close()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||||
}
|
}
|
||||||
if clErr != nil {
|
if clErr != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -85,7 +85,7 @@ func jsonschemaDraft04JSON() (*asset, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)}
|
info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)}
|
||||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}}
|
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}}
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ func v2SchemaJSON() (*asset, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)}
|
info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)}
|
||||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}}
|
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}}
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/go-openapi/spec/schema_loader.go
generated
vendored
1
vendor/github.com/go-openapi/spec/schema_loader.go
generated
vendored
@ -160,6 +160,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
|
|||||||
if !fromCache {
|
if !fromCache {
|
||||||
b, err := r.loadDoc(normalized)
|
b, err := r.loadDoc(normalized)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
debugLog("unable to load the document: %v", err)
|
||||||
return nil, url.URL{}, false, err
|
return nil, url.URL{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
47
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
47
vendor/github.com/go-openapi/swag/yaml.go
generated
vendored
@ -143,19 +143,43 @@ func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func transformData(input interface{}) (out interface{}, err error) {
|
func transformData(input interface{}) (out interface{}, err error) {
|
||||||
|
format := func(t interface{}) (string, error) {
|
||||||
|
switch k := t.(type) {
|
||||||
|
case string:
|
||||||
|
return k, nil
|
||||||
|
case uint:
|
||||||
|
return strconv.FormatUint(uint64(k), 10), nil
|
||||||
|
case uint8:
|
||||||
|
return strconv.FormatUint(uint64(k), 10), nil
|
||||||
|
case uint16:
|
||||||
|
return strconv.FormatUint(uint64(k), 10), nil
|
||||||
|
case uint32:
|
||||||
|
return strconv.FormatUint(uint64(k), 10), nil
|
||||||
|
case uint64:
|
||||||
|
return strconv.FormatUint(k, 10), nil
|
||||||
|
case int:
|
||||||
|
return strconv.Itoa(k), nil
|
||||||
|
case int8:
|
||||||
|
return strconv.FormatInt(int64(k), 10), nil
|
||||||
|
case int16:
|
||||||
|
return strconv.FormatInt(int64(k), 10), nil
|
||||||
|
case int32:
|
||||||
|
return strconv.FormatInt(int64(k), 10), nil
|
||||||
|
case int64:
|
||||||
|
return strconv.FormatInt(k, 10), nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unexpected map key type, got: %T", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch in := input.(type) {
|
switch in := input.(type) {
|
||||||
case yaml.MapSlice:
|
case yaml.MapSlice:
|
||||||
|
|
||||||
o := make(JSONMapSlice, len(in))
|
o := make(JSONMapSlice, len(in))
|
||||||
for i, mi := range in {
|
for i, mi := range in {
|
||||||
var nmi JSONMapItem
|
var nmi JSONMapItem
|
||||||
switch k := mi.Key.(type) {
|
if nmi.Key, err = format(mi.Key); err != nil {
|
||||||
case string:
|
return nil, err
|
||||||
nmi.Key = k
|
|
||||||
case int:
|
|
||||||
nmi.Key = strconv.Itoa(k)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ert := transformData(mi.Value)
|
v, ert := transformData(mi.Value)
|
||||||
@ -170,13 +194,8 @@ func transformData(input interface{}) (out interface{}, err error) {
|
|||||||
o := make(JSONMapSlice, 0, len(in))
|
o := make(JSONMapSlice, 0, len(in))
|
||||||
for ke, va := range in {
|
for ke, va := range in {
|
||||||
var nmi JSONMapItem
|
var nmi JSONMapItem
|
||||||
switch k := ke.(type) {
|
if nmi.Key, err = format(ke); err != nil {
|
||||||
case string:
|
return nil, err
|
||||||
nmi.Key = k
|
|
||||||
case int:
|
|
||||||
nmi.Key = strconv.Itoa(k)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ert := transformData(va)
|
v, ert := transformData(va)
|
||||||
|
2
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
2
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
generated
vendored
@ -19,7 +19,7 @@ var _ = math.Inf
|
|||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
|
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
|
||||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||||
|
1
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
1
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
|||||||
// SetExtension sets the specified extension of pb to the specified value.
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||||
if epb, ok := pb.(extensionsBytes); ok {
|
if epb, ok := pb.(extensionsBytes); ok {
|
||||||
|
ClearExtension(pb, extension)
|
||||||
newb, err := encodeExtension(extension, value)
|
newb, err := encodeExtension(extension, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
21
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
21
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error)
|
|||||||
return EncodeExtensionMap(m.extensionsWrite(), data)
|
return EncodeExtensionMap(m.extensionsWrite(), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
|
||||||
|
return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
|
||||||
|
}
|
||||||
|
|
||||||
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||||
o := 0
|
o := 0
|
||||||
for _, e := range m {
|
for _, e := range m {
|
||||||
@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
|||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
|
||||||
|
o := 0
|
||||||
|
end := len(data)
|
||||||
|
for _, e := range m {
|
||||||
|
if err := e.Encode(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
n := copy(data[end-len(e.enc):], e.enc)
|
||||||
|
if n != len(e.enc) {
|
||||||
|
return 0, io.ErrShortBuffer
|
||||||
|
}
|
||||||
|
end -= n
|
||||||
|
o += n
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
||||||
e := m[id]
|
e := m[id]
|
||||||
if err := e.Encode(); err != nil {
|
if err := e.Encode(); err != nil {
|
||||||
|
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@ -948,13 +948,19 @@ func isProto3Zero(v reflect.Value) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
const (
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||||
const GoGoProtoPackageIsVersion2 = true
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
GoGoProtoPackageIsVersion3 = true
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
const GoGoProtoPackageIsVersion1 = true
|
GoGoProtoPackageIsVersion2 = true
|
||||||
|
|
||||||
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
GoGoProtoPackageIsVersion1 = true
|
||||||
|
)
|
||||||
|
|
||||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||||
// This type is not intended to be used by non-generated code.
|
// This type is not intended to be used by non-generated code.
|
||||||
|
66
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
66
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@ -400,6 +400,15 @@ func GetProperties(t reflect.Type) *StructProperties {
|
|||||||
return sprop
|
return sprop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
oneofFuncsIface interface {
|
||||||
|
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||||
|
}
|
||||||
|
oneofWrappersIface interface {
|
||||||
|
XXX_OneofWrappers() []interface{}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// getPropertiesLocked requires that propertiesMu is held.
|
// getPropertiesLocked requires that propertiesMu is held.
|
||||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
if prop, ok := propertiesMap[t]; ok {
|
if prop, ok := propertiesMap[t]; ok {
|
||||||
@ -441,37 +450,40 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
// Re-order prop.order.
|
// Re-order prop.order.
|
||||||
sort.Sort(prop)
|
sort.Sort(prop)
|
||||||
|
|
||||||
type oneofMessage interface {
|
if isOneofMessage {
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
|
||||||
}
|
|
||||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
|
|
||||||
var oots []interface{}
|
var oots []interface{}
|
||||||
_, _, _, oots = om.XXX_OneofFuncs()
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
|
case oneofFuncsIface:
|
||||||
// Interpret oneof metadata.
|
_, _, _, oots = m.XXX_OneofFuncs()
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
case oneofWrappersIface:
|
||||||
for _, oot := range oots {
|
oots = m.XXX_OneofWrappers()
|
||||||
oop := &OneofProperties{
|
}
|
||||||
Type: reflect.ValueOf(oot).Type(), // *T
|
if len(oots) > 0 {
|
||||||
Prop: new(Properties),
|
// Interpret oneof metadata.
|
||||||
}
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
sft := oop.Type.Elem().Field(0)
|
for _, oot := range oots {
|
||||||
oop.Prop.Name = sft.Name
|
oop := &OneofProperties{
|
||||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
Type: reflect.ValueOf(oot).Type(), // *T
|
||||||
// There will be exactly one interface field that
|
Prop: new(Properties),
|
||||||
// this new value is assignable to.
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if f.Type.Kind() != reflect.Interface {
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
if !oop.Type.AssignableTo(f.Type) {
|
sft := oop.Type.Elem().Field(0)
|
||||||
continue
|
oop.Prop.Name = sft.Name
|
||||||
|
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||||
|
// There will be exactly one interface field that
|
||||||
|
// this new value is assignable to.
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if f.Type.Kind() != reflect.Interface {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !oop.Type.AssignableTo(f.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oop.Field = i
|
||||||
|
break
|
||||||
}
|
}
|
||||||
oop.Field = i
|
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||||
break
|
|
||||||
}
|
}
|
||||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
13
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
13
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
@ -389,8 +389,13 @@ func (u *marshalInfo) computeMarshalInfo() {
|
|||||||
// get oneof implementers
|
// get oneof implementers
|
||||||
var oneofImplementers []interface{}
|
var oneofImplementers []interface{}
|
||||||
// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
|
// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
|
if isOneofMessage {
|
||||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
|
case oneofFuncsIface:
|
||||||
|
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||||
|
case oneofWrappersIface:
|
||||||
|
oneofImplementers = m.XXX_OneofWrappers()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// normal fields
|
// normal fields
|
||||||
@ -519,10 +524,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type oneofMessage interface {
|
|
||||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// wiretype returns the wire encoding of the type.
|
// wiretype returns the wire encoding of the type.
|
||||||
func wiretype(encoding string) uint64 {
|
func wiretype(encoding string) uint64 {
|
||||||
switch encoding {
|
switch encoding {
|
||||||
|
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
@ -530,6 +530,25 @@ func (mi *mergeInfo) computeMergeInfo() {
|
|||||||
}
|
}
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
switch {
|
switch {
|
||||||
|
case isSlice && !isPointer: // E.g. []pb.T
|
||||||
|
mergeInfo := getMergeInfo(tf)
|
||||||
|
zero := reflect.Zero(tf)
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
// TODO: Make this faster?
|
||||||
|
dstsp := dst.asPointerTo(f.Type)
|
||||||
|
dsts := dstsp.Elem()
|
||||||
|
srcs := src.asPointerTo(f.Type).Elem()
|
||||||
|
for i := 0; i < srcs.Len(); i++ {
|
||||||
|
dsts = reflect.Append(dsts, zero)
|
||||||
|
srcElement := srcs.Index(i).Addr()
|
||||||
|
dstElement := dsts.Index(dsts.Len() - 1).Addr()
|
||||||
|
mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
|
||||||
|
}
|
||||||
|
if dsts.IsNil() {
|
||||||
|
dsts = reflect.MakeSlice(f.Type, 0, 0)
|
||||||
|
}
|
||||||
|
dstsp.Elem().Set(dsts)
|
||||||
|
}
|
||||||
case !isPointer:
|
case !isPointer:
|
||||||
mergeInfo := getMergeInfo(tf)
|
mergeInfo := getMergeInfo(tf)
|
||||||
mfi.merge = func(dst, src pointer) {
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
@ -371,15 +371,18 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find any types associated with oneof fields.
|
// Find any types associated with oneof fields.
|
||||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
|
||||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
|
||||||
// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
|
// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||||
if fn.IsValid() && len(oneofFields) > 0 {
|
if len(oneofFields) > 0 {
|
||||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
var oneofImplementers []interface{}
|
||||||
for i := res.Len() - 1; i >= 0; i-- {
|
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||||
v := res.Index(i) // interface{}
|
case oneofFuncsIface:
|
||||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||||
typ := tptr.Elem() // Msg_X
|
case oneofWrappersIface:
|
||||||
|
oneofImplementers = m.XXX_OneofWrappers()
|
||||||
|
}
|
||||||
|
for _, v := range oneofImplementers {
|
||||||
|
tptr := reflect.TypeOf(v) // *Msg_X
|
||||||
|
typ := tptr.Elem() // Msg_X
|
||||||
|
|
||||||
f := typ.Field(0) // oneof implementers have one field
|
f := typ.Field(0) // oneof implementers have one field
|
||||||
baseUnmarshal := fieldUnmarshaler(&f)
|
baseUnmarshal := fieldUnmarshaler(&f)
|
||||||
@ -407,11 +410,12 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get extension ranges, if any.
|
// Get extension ranges, if any.
|
||||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||||
if fn.IsValid() {
|
if fn.IsValid() {
|
||||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
|
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
|
||||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||||
|
2
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
2
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
@ -18,7 +18,7 @@ var _ = math.Inf
|
|||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
type FieldDescriptorProto_Type int32
|
type FieldDescriptorProto_Type int32
|
||||||
|
|
||||||
|
4
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
4
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sort the map keys.
|
// Sort the map keys.
|
||||||
sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||||
|
|
||||||
// Deduplicate keys (fails for NaNs).
|
// Deduplicate keys (fails for NaNs).
|
||||||
vs2 := vs[:1]
|
vs2 := vs[:1]
|
||||||
@ -42,6 +42,8 @@ func isLess(x, y reflect.Value) bool {
|
|||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
return x.Uint() < y.Uint()
|
return x.Uint() < y.Uint()
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
|
// NOTE: This does not sort -0 as less than +0
|
||||||
|
// since Go maps treat -0 and +0 as equal keys.
|
||||||
fx, fy := x.Float(), y.Float()
|
fx, fy := x.Float(), y.Float()
|
||||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||||
case reflect.Complex64, reflect.Complex128:
|
case reflect.Complex64, reflect.Complex128:
|
||||||
|
9
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
9
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
package value
|
package value
|
||||||
|
|
||||||
import "reflect"
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
// IsZero reports whether v is the zero value.
|
// IsZero reports whether v is the zero value.
|
||||||
// This does not rely on Interface and so can be used on unexported fields.
|
// This does not rely on Interface and so can be used on unexported fields.
|
||||||
@ -17,9 +20,9 @@ func IsZero(v reflect.Value) bool {
|
|||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
return v.Uint() == 0
|
return v.Uint() == 0
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
return v.Float() == 0
|
return math.Float64bits(v.Float()) == 0
|
||||||
case reflect.Complex64, reflect.Complex128:
|
case reflect.Complex64, reflect.Complex128:
|
||||||
return v.Complex() == 0
|
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
return v.String() == ""
|
return v.String() == ""
|
||||||
case reflect.UnsafePointer:
|
case reflect.UnsafePointer:
|
||||||
|
2
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -168,7 +168,7 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te
|
|||||||
var isZero bool
|
var isZero bool
|
||||||
switch opts.DiffMode {
|
switch opts.DiffMode {
|
||||||
case diffIdentical:
|
case diffIdentical:
|
||||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX)
|
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
||||||
case diffRemoved:
|
case diffRemoved:
|
||||||
isZero = value.IsZero(r.Value.ValueX)
|
isZero = value.IsZero(r.Value.ValueX)
|
||||||
case diffInserted:
|
case diffInserted:
|
||||||
|
1
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -208,7 +208,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t
|
|||||||
func formatMapKey(v reflect.Value) string {
|
func formatMapKey(v reflect.Value) string {
|
||||||
var opts formatOptions
|
var opts formatOptions
|
||||||
opts.TypeMode = elideType
|
opts.TypeMode = elideType
|
||||||
opts.AvoidStringer = true
|
|
||||||
opts.ShallowPointers = true
|
opts.ShallowPointers = true
|
||||||
s := opts.FormatValue(v, visitedPointers{}).String()
|
s := opts.FormatValue(v, visitedPointers{}).String()
|
||||||
return strings.TrimSpace(s)
|
return strings.TrimSpace(s)
|
||||||
|
4
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
4
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -90,7 +90,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|||||||
}
|
}
|
||||||
if r == '\n' {
|
if r == '\n' {
|
||||||
if maxLineLen < i-lastLineIdx {
|
if maxLineLen < i-lastLineIdx {
|
||||||
lastLineIdx = i - lastLineIdx
|
maxLineLen = i - lastLineIdx
|
||||||
}
|
}
|
||||||
lastLineIdx = i + 1
|
lastLineIdx = i + 1
|
||||||
numLines++
|
numLines++
|
||||||
@ -322,7 +322,7 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
|
|||||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||||
*prev = (*prev).Append(*curr).Append(*next)
|
*prev = prev.Append(*curr).Append(*next)
|
||||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
7
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
@ -19,6 +19,11 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
|||||||
type indentMode int
|
type indentMode int
|
||||||
|
|
||||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||||
|
// The output of Diff is documented as being unstable to provide future
|
||||||
|
// flexibility in changing the output for more humanly readable reports.
|
||||||
|
// This logic intentionally introduces instability to the exact output
|
||||||
|
// so that users can detect accidental reliance on stability early on,
|
||||||
|
// rather than much later when an actual change to the format occurs.
|
||||||
if flags.Deterministic || randBool {
|
if flags.Deterministic || randBool {
|
||||||
// Use regular spaces (U+0020).
|
// Use regular spaces (U+0020).
|
||||||
switch d {
|
switch d {
|
||||||
@ -360,7 +365,7 @@ func (s diffStats) String() string {
|
|||||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||||
name := s.Name
|
name := s.Name
|
||||||
if sum > 1 {
|
if sum > 1 {
|
||||||
name = name + "s"
|
name += "s"
|
||||||
if strings.HasSuffix(name, "ys") {
|
if strings.HasSuffix(name, "ys") {
|
||||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||||
}
|
}
|
||||||
|
3335
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
3335
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
17
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
17
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
@ -71,6 +71,17 @@ func RemoveFromInfoCache(filename string) {
|
|||||||
delete(infoCache, filename)
|
delete(infoCache, filename)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetInfoCache() map[string]interface{} {
|
||||||
|
if infoCache == nil {
|
||||||
|
initializeInfoCache()
|
||||||
|
}
|
||||||
|
return infoCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClearInfoCache() {
|
||||||
|
infoCache = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
// FetchFile gets a specified file from the local filesystem or a remote location.
|
// FetchFile gets a specified file from the local filesystem or a remote location.
|
||||||
func FetchFile(fileurl string) ([]byte, error) {
|
func FetchFile(fileurl string) ([]byte, error) {
|
||||||
var bytes []byte
|
var bytes []byte
|
||||||
@ -168,7 +179,11 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
|||||||
parts := strings.Split(ref, "#")
|
parts := strings.Split(ref, "#")
|
||||||
var filename string
|
var filename string
|
||||||
if parts[0] != "" {
|
if parts[0] != "" {
|
||||||
filename = basedir + parts[0]
|
filename = parts[0]
|
||||||
|
if _, err := url.ParseRequestURI(parts[0]); err != nil {
|
||||||
|
// It is not an URL, so the file is local
|
||||||
|
filename = basedir + parts[0]
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
filename = basefile
|
filename = basefile
|
||||||
}
|
}
|
||||||
|
94
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
94
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
@ -1,12 +1,14 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: extension.proto
|
// source: extensions/extension.proto
|
||||||
|
|
||||||
package openapiextension_v1
|
package openapiextension_v1
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import (
|
||||||
import fmt "fmt"
|
fmt "fmt"
|
||||||
import math "math"
|
proto "github.com/golang/protobuf/proto"
|
||||||
import any "github.com/golang/protobuf/ptypes/any"
|
any "github.com/golang/protobuf/ptypes/any"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
@ -17,7 +19,7 @@ var _ = math.Inf
|
|||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// The version number of OpenAPI compiler.
|
// The version number of OpenAPI compiler.
|
||||||
type Version struct {
|
type Version struct {
|
||||||
@ -36,16 +38,17 @@ func (m *Version) Reset() { *m = Version{} }
|
|||||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Version) ProtoMessage() {}
|
func (*Version) ProtoMessage() {}
|
||||||
func (*Version) Descriptor() ([]byte, []int) {
|
func (*Version) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_extension_d25f09c742c58c90, []int{0}
|
return fileDescriptor_661e47e790f76671, []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Version) XXX_Unmarshal(b []byte) error {
|
func (m *Version) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Version.Unmarshal(m, b)
|
return xxx_messageInfo_Version.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Version) XXX_Merge(src proto.Message) {
|
func (m *Version) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Version.Merge(dst, src)
|
xxx_messageInfo_Version.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Version) XXX_Size() int {
|
func (m *Version) XXX_Size() int {
|
||||||
return xxx_messageInfo_Version.Size(m)
|
return xxx_messageInfo_Version.Size(m)
|
||||||
@ -100,16 +103,17 @@ func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest
|
|||||||
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||||
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
|
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_extension_d25f09c742c58c90, []int{1}
|
return fileDescriptor_661e47e790f76671, []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
|
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
|
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
|
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
|
func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_ExtensionHandlerRequest.Merge(dst, src)
|
xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *ExtensionHandlerRequest) XXX_Size() int {
|
func (m *ExtensionHandlerRequest) XXX_Size() int {
|
||||||
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
|
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
|
||||||
@ -159,16 +163,17 @@ func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerRespon
|
|||||||
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||||
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
|
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_extension_d25f09c742c58c90, []int{2}
|
return fileDescriptor_661e47e790f76671, []int{2}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
|
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
|
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
|
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
|
func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_ExtensionHandlerResponse.Merge(dst, src)
|
xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *ExtensionHandlerResponse) XXX_Size() int {
|
func (m *ExtensionHandlerResponse) XXX_Size() int {
|
||||||
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
|
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
|
||||||
@ -216,16 +221,17 @@ func (m *Wrapper) Reset() { *m = Wrapper{} }
|
|||||||
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Wrapper) ProtoMessage() {}
|
func (*Wrapper) ProtoMessage() {}
|
||||||
func (*Wrapper) Descriptor() ([]byte, []int) {
|
func (*Wrapper) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_extension_d25f09c742c58c90, []int{3}
|
return fileDescriptor_661e47e790f76671, []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Wrapper) XXX_Unmarshal(b []byte) error {
|
func (m *Wrapper) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Wrapper.Unmarshal(m, b)
|
return xxx_messageInfo_Wrapper.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Wrapper) XXX_Merge(src proto.Message) {
|
func (m *Wrapper) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Wrapper.Merge(dst, src)
|
xxx_messageInfo_Wrapper.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Wrapper) XXX_Size() int {
|
func (m *Wrapper) XXX_Size() int {
|
||||||
return xxx_messageInfo_Wrapper.Size(m)
|
return xxx_messageInfo_Wrapper.Size(m)
|
||||||
@ -264,31 +270,31 @@ func init() {
|
|||||||
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("extension.proto", fileDescriptor_extension_d25f09c742c58c90) }
|
func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) }
|
||||||
|
|
||||||
var fileDescriptor_extension_d25f09c742c58c90 = []byte{
|
var fileDescriptor_661e47e790f76671 = []byte{
|
||||||
// 357 bytes of a gzipped FileDescriptorProto
|
// 362 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40,
|
||||||
0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64,
|
0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50,
|
||||||
0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46,
|
0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7,
|
||||||
0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7,
|
0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85,
|
||||||
0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22,
|
0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
|
||||||
0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11,
|
0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71,
|
||||||
0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18,
|
0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e,
|
||||||
0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8,
|
0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d,
|
||||||
0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c,
|
0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef,
|
||||||
0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69,
|
0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35,
|
||||||
0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef,
|
0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14,
|
||||||
0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3,
|
0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39,
|
||||||
0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87,
|
0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08,
|
||||||
0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44,
|
0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81,
|
||||||
0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80,
|
0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3,
|
||||||
0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4,
|
0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8,
|
||||||
0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54,
|
0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35,
|
||||||
0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c,
|
0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0,
|
||||||
0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e,
|
0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18,
|
||||||
0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2,
|
0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09,
|
||||||
0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa,
|
0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d,
|
||||||
0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b,
|
0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00,
|
||||||
0x80, 0x51, 0x02, 0x00, 0x00,
|
0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
151
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
151
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
@ -6,7 +6,8 @@
|
|||||||
package grpc_middleware
|
package grpc_middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,35 +19,19 @@ import (
|
|||||||
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
if n > 1 {
|
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||||
lastI := n - 1
|
chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
|
||||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||||
var (
|
return currentInter(currentCtx, currentReq, info, currentHandler)
|
||||||
chainHandler grpc.UnaryHandler
|
|
||||||
curI int
|
|
||||||
)
|
|
||||||
|
|
||||||
chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
|
||||||
if curI == lastI {
|
|
||||||
return handler(currentCtx, currentReq)
|
|
||||||
}
|
|
||||||
curI++
|
|
||||||
resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
|
|
||||||
curI--
|
|
||||||
return resp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return interceptors[0](ctx, req, info, chainHandler)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if n == 1 {
|
chainedHandler := handler
|
||||||
return interceptors[0]
|
for i := n - 1; i >= 0; i-- {
|
||||||
}
|
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||||
|
}
|
||||||
|
|
||||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
return chainedHandler(ctx, req)
|
||||||
return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
|
||||||
return handler(ctx, req)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,35 +43,19 @@ func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnarySer
|
|||||||
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
if n > 1 {
|
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||||
lastI := n - 1
|
chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
|
||||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||||
var (
|
return currentInter(currentSrv, currentStream, info, currentHandler)
|
||||||
chainHandler grpc.StreamHandler
|
|
||||||
curI int
|
|
||||||
)
|
|
||||||
|
|
||||||
chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
|
||||||
if curI == lastI {
|
|
||||||
return handler(currentSrv, currentStream)
|
|
||||||
}
|
|
||||||
curI++
|
|
||||||
err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
|
|
||||||
curI--
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return interceptors[0](srv, stream, info, chainHandler)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if n == 1 {
|
chainedHandler := handler
|
||||||
return interceptors[0]
|
for i := n - 1; i >= 0; i-- {
|
||||||
}
|
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||||
|
}
|
||||||
|
|
||||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
return chainedHandler(srv, ss)
|
||||||
return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
|
||||||
return handler(srv, stream)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,35 +66,19 @@ func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.Stream
|
|||||||
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
if n > 1 {
|
|
||||||
lastI := n - 1
|
|
||||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
|
||||||
var (
|
|
||||||
chainHandler grpc.UnaryInvoker
|
|
||||||
curI int
|
|
||||||
)
|
|
||||||
|
|
||||||
chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
|
||||||
if curI == lastI {
|
|
||||||
return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
|
|
||||||
}
|
|
||||||
curI++
|
|
||||||
err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
|
|
||||||
curI--
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 1 {
|
|
||||||
return interceptors[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
|
||||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
return invoker(ctx, method, req, reply, cc, opts...)
|
chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
|
||||||
|
return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||||
|
return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chainedInvoker := invoker
|
||||||
|
for i := n - 1; i >= 0; i-- {
|
||||||
|
chainedInvoker = chainer(interceptors[i], chainedInvoker)
|
||||||
|
}
|
||||||
|
|
||||||
|
return chainedInvoker(ctx, method, req, reply, cc, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,35 +89,19 @@ func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryCli
|
|||||||
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
||||||
n := len(interceptors)
|
n := len(interceptors)
|
||||||
|
|
||||||
if n > 1 {
|
|
||||||
lastI := n - 1
|
|
||||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
|
||||||
var (
|
|
||||||
chainHandler grpc.Streamer
|
|
||||||
curI int
|
|
||||||
)
|
|
||||||
|
|
||||||
chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
|
||||||
if curI == lastI {
|
|
||||||
return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
|
|
||||||
}
|
|
||||||
curI++
|
|
||||||
stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
|
|
||||||
curI--
|
|
||||||
return stream, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if n == 1 {
|
|
||||||
return interceptors[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
|
||||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
return streamer(ctx, desc, cc, method, opts...)
|
chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
|
||||||
|
return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
|
return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chainedStreamer := streamer
|
||||||
|
for i := n - 1; i >= 0; i-- {
|
||||||
|
chainedStreamer = chainer(interceptors[i], chainedStreamer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return chainedStreamer(ctx, desc, cc, method, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/recovery/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md
generated
vendored
1
vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/README.md
generated
vendored
@ -1 +0,0 @@
|
|||||||
DOC.md
|
|
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
@ -4,7 +4,8 @@
|
|||||||
package grpc_middleware
|
package grpc_middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
10
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
10
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
const VERSION = "1.8.0"
|
const VERSION = "1.10.1"
|
||||||
|
|
||||||
type GinkgoConfigType struct {
|
type GinkgoConfigType struct {
|
||||||
RandomSeed int64
|
RandomSeed int64
|
||||||
@ -52,13 +52,14 @@ type DefaultReporterConfigType struct {
|
|||||||
Succinct bool
|
Succinct bool
|
||||||
Verbose bool
|
Verbose bool
|
||||||
FullTrace bool
|
FullTrace bool
|
||||||
|
ReportPassed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||||
|
|
||||||
func processPrefix(prefix string) string {
|
func processPrefix(prefix string) string {
|
||||||
if prefix != "" {
|
if prefix != "" {
|
||||||
prefix = prefix + "."
|
prefix += "."
|
||||||
}
|
}
|
||||||
return prefix
|
return prefix
|
||||||
}
|
}
|
||||||
@ -98,6 +99,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
|||||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||||
@ -196,5 +198,9 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
|
|||||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if reporter.ReportPassed {
|
||||||
|
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
@ -283,7 +283,7 @@ func GinkgoRecover() {
|
|||||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
//
|
//
|
||||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
|
||||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||||
func Describe(text string, body func()) bool {
|
func Describe(text string, body func()) bool {
|
||||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
@ -499,7 +499,7 @@ func AfterSuite(body interface{}, timeout ...float64) bool {
|
|||||||
//until that node is done before running.
|
//until that node is done before running.
|
||||||
//
|
//
|
||||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||||
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||||
//to the second function (on all the other nodes).
|
//to the second function (on all the other nodes).
|
||||||
//
|
//
|
||||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||||
|
20
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
20
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
@ -11,19 +11,35 @@ import (
|
|||||||
|
|
||||||
func New(skip int) types.CodeLocation {
|
func New(skip int) types.CodeLocation {
|
||||||
_, file, line, _ := runtime.Caller(skip + 1)
|
_, file, line, _ := runtime.Caller(skip + 1)
|
||||||
stackTrace := PruneStack(string(debug.Stack()), skip)
|
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PruneStack removes references to functions that are internal to Ginkgo
|
||||||
|
// and the Go runtime from a stack string and a certain number of stack entries
|
||||||
|
// at the beginning of the stack. The stack string has the format
|
||||||
|
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||||
|
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||||
|
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||||
|
// entry.
|
||||||
func PruneStack(fullStackTrace string, skip int) string {
|
func PruneStack(fullStackTrace string, skip int) string {
|
||||||
stack := strings.Split(fullStackTrace, "\n")
|
stack := strings.Split(fullStackTrace, "\n")
|
||||||
|
// Ensure that the even entries are the method names and the
|
||||||
|
// the odd entries the source code information.
|
||||||
|
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||||
|
// Ignore "goroutine 29 [running]:" line.
|
||||||
|
stack = stack[1:]
|
||||||
|
}
|
||||||
|
// The "+1" is for skipping over the initial entry, which is
|
||||||
|
// runtime/debug.Stack() itself.
|
||||||
if len(stack) > 2*(skip+1) {
|
if len(stack) > 2*(skip+1) {
|
||||||
stack = stack[2*(skip+1):]
|
stack = stack[2*(skip+1):]
|
||||||
}
|
}
|
||||||
prunedStack := []string{}
|
prunedStack := []string{}
|
||||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||||
for i := 0; i < len(stack)/2; i++ {
|
for i := 0; i < len(stack)/2; i++ {
|
||||||
if !re.Match([]byte(stack[i*2])) {
|
// We filter out based on the source code file name.
|
||||||
|
if !re.Match([]byte(stack[i*2+1])) {
|
||||||
prunedStack = append(prunedStack, stack[i*2])
|
prunedStack = append(prunedStack, stack[i*2])
|
||||||
prunedStack = append(prunedStack, stack[i*2+1])
|
prunedStack = append(prunedStack, stack[i*2+1])
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
@ -17,7 +17,7 @@ type benchmarker struct {
|
|||||||
|
|
||||||
func newBenchmarker() *benchmarker {
|
func newBenchmarker() *benchmarker {
|
||||||
return &benchmarker{
|
return &benchmarker{
|
||||||
measurements: make(map[string]*types.SpecMeasurement, 0),
|
measurements: make(map[string]*types.SpecMeasurement),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
12
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
@ -54,11 +54,11 @@ func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporte
|
|||||||
config: config,
|
config: config,
|
||||||
stenographer: stenographer,
|
stenographer: stenographer,
|
||||||
|
|
||||||
suiteBeginnings: make(chan configAndSuite, 0),
|
suiteBeginnings: make(chan configAndSuite),
|
||||||
beforeSuites: make(chan *types.SetupSummary, 0),
|
beforeSuites: make(chan *types.SetupSummary),
|
||||||
afterSuites: make(chan *types.SetupSummary, 0),
|
afterSuites: make(chan *types.SetupSummary),
|
||||||
specCompletions: make(chan *types.SpecSummary, 0),
|
specCompletions: make(chan *types.SpecSummary),
|
||||||
suiteEndings: make(chan *types.SuiteSummary, 0),
|
suiteEndings: make(chan *types.SuiteSummary),
|
||||||
}
|
}
|
||||||
|
|
||||||
go aggregator.mux()
|
go aggregator.mux()
|
||||||
@ -227,7 +227,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi
|
|||||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||||
|
|
||||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||||
if suiteSummary.SuiteSucceeded == false {
|
if !suiteSummary.SuiteSucceeded {
|
||||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
@ -213,7 +213,7 @@ func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Re
|
|||||||
c := spec_iterator.Counter{}
|
c := spec_iterator.Counter{}
|
||||||
server.lock.Lock()
|
server.lock.Lock()
|
||||||
c.Index = server.counter
|
c.Index = server.counter
|
||||||
server.counter = server.counter + 1
|
server.counter++
|
||||||
server.lock.Unlock()
|
server.lock.Unlock()
|
||||||
|
|
||||||
json.NewEncoder(writer).Encode(c)
|
json.NewEncoder(writer).Encode(c)
|
||||||
|
10
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
10
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
@ -107,11 +107,11 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
|||||||
NumberOfSamples: spec.subject.Samples(),
|
NumberOfSamples: spec.subject.Samples(),
|
||||||
ComponentTexts: componentTexts,
|
ComponentTexts: componentTexts,
|
||||||
ComponentCodeLocations: componentCodeLocations,
|
ComponentCodeLocations: componentCodeLocations,
|
||||||
State: spec.getState(),
|
State: spec.getState(),
|
||||||
RunTime: runTime,
|
RunTime: runTime,
|
||||||
Failure: spec.failure,
|
Failure: spec.failure,
|
||||||
Measurements: spec.measurementsReport(),
|
Measurements: spec.measurementsReport(),
|
||||||
SuiteID: suiteID,
|
SuiteID: suiteID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
@ -107,11 +107,11 @@ func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string,
|
|||||||
toMatch := e.toMatch(description, i)
|
toMatch := e.toMatch(description, i)
|
||||||
|
|
||||||
if focusFilter != nil {
|
if focusFilter != nil {
|
||||||
matchesFocus = focusFilter.Match([]byte(toMatch))
|
matchesFocus = focusFilter.Match(toMatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
if skipFilter != nil {
|
if skipFilter != nil {
|
||||||
matchesSkip = skipFilter.Match([]byte(toMatch))
|
matchesSkip = skipFilter.Match(toMatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !matchesFocus || matchesSkip {
|
if !matchesFocus || matchesSkip {
|
||||||
|
2
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
@ -300,7 +300,7 @@ func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||||
if failed && len(summary.CapturedOutput) == 0 {
|
if len(summary.CapturedOutput) == 0 {
|
||||||
summary.CapturedOutput = string(runner.writer.Bytes())
|
summary.CapturedOutput = string(runner.writer.Bytes())
|
||||||
}
|
}
|
||||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||||
|
3
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
@ -62,6 +62,9 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
|
|||||||
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||||
} else {
|
} else {
|
||||||
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||||
|
if reporter.config.ReportPassed {
|
||||||
|
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case types.SpecStatePending:
|
case types.SpecStatePending:
|
||||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||||
|
25
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
25
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
@ -32,12 +32,17 @@ type JUnitTestSuite struct {
|
|||||||
type JUnitTestCase struct {
|
type JUnitTestCase struct {
|
||||||
Name string `xml:"name,attr"`
|
Name string `xml:"name,attr"`
|
||||||
ClassName string `xml:"classname,attr"`
|
ClassName string `xml:"classname,attr"`
|
||||||
|
PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"`
|
||||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||||
Time float64 `xml:"time,attr"`
|
Time float64 `xml:"time,attr"`
|
||||||
SystemOut string `xml:"system-out,omitempty"`
|
SystemOut string `xml:"system-out,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type JUnitPassedMessage struct {
|
||||||
|
Message string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
type JUnitFailureMessage struct {
|
type JUnitFailureMessage struct {
|
||||||
Type string `xml:"type,attr"`
|
Type string `xml:"type,attr"`
|
||||||
Message string `xml:",chardata"`
|
Message string `xml:",chardata"`
|
||||||
@ -48,9 +53,10 @@ type JUnitSkipped struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type JUnitReporter struct {
|
type JUnitReporter struct {
|
||||||
suite JUnitTestSuite
|
suite JUnitTestSuite
|
||||||
filename string
|
filename string
|
||||||
testSuiteName string
|
testSuiteName string
|
||||||
|
ReporterConfig config.DefaultReporterConfigType
|
||||||
}
|
}
|
||||||
|
|
||||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||||
@ -60,12 +66,13 @@ func NewJUnitReporter(filename string) *JUnitReporter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
reporter.suite = JUnitTestSuite{
|
reporter.suite = JUnitTestSuite{
|
||||||
Name: summary.SuiteDescription,
|
Name: summary.SuiteDescription,
|
||||||
TestCases: []JUnitTestCase{},
|
TestCases: []JUnitTestCase{},
|
||||||
}
|
}
|
||||||
reporter.testSuiteName = summary.SuiteDescription
|
reporter.testSuiteName = summary.SuiteDescription
|
||||||
|
reporter.ReporterConfig = config.DefaultReporterConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
@ -105,11 +112,21 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
|||||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||||
ClassName: reporter.testSuiteName,
|
ClassName: reporter.testSuiteName,
|
||||||
}
|
}
|
||||||
|
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||||
|
testCase.PassedMessage = &JUnitPassedMessage{
|
||||||
|
Message: specSummary.CapturedOutput,
|
||||||
|
}
|
||||||
|
}
|
||||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||||
testCase.FailureMessage = &JUnitFailureMessage{
|
testCase.FailureMessage = &JUnitFailureMessage{
|
||||||
Type: reporter.failureTypeForState(specSummary.State),
|
Type: reporter.failureTypeForState(specSummary.State),
|
||||||
Message: failureMessage(specSummary.Failure),
|
Message: failureMessage(specSummary.Failure),
|
||||||
}
|
}
|
||||||
|
if specSummary.State == types.SpecStatePanicked {
|
||||||
|
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
|
||||||
|
specSummary.Failure.ForwardedPanic,
|
||||||
|
specSummary.Failure.Location.FullStackTrace)
|
||||||
|
}
|
||||||
testCase.SystemOut = specSummary.CapturedOutput
|
testCase.SystemOut = specSummary.CapturedOutput
|
||||||
}
|
}
|
||||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||||
|
9
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
@ -22,8 +22,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type TeamCityReporter struct {
|
type TeamCityReporter struct {
|
||||||
writer io.Writer
|
writer io.Writer
|
||||||
testSuiteName string
|
testSuiteName string
|
||||||
|
ReporterConfig config.DefaultReporterConfigType
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
||||||
@ -65,6 +66,10 @@ func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
|||||||
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||||
|
|
||||||
|
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||||
|
details := escape(specSummary.CapturedOutput)
|
||||||
|
fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']", messageId, testName, details)
|
||||||
|
}
|
||||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||||
message := escape(specSummary.Failure.ComponentCodeLocation.String())
|
message := escape(specSummary.Failure.ComponentCodeLocation.String())
|
||||||
details := escape(specSummary.Failure.Message)
|
details := escape(specSummary.Failure.Message)
|
||||||
|
2
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
@ -17,7 +17,7 @@ each node does not deterministically know how many specs it will end up running.
|
|||||||
|
|
||||||
Unfortunately making such a change would break backward compatibility.
|
Unfortunately making such a change would break backward compatibility.
|
||||||
|
|
||||||
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields
|
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
|
||||||
with -1.
|
with -1.
|
||||||
*/
|
*/
|
||||||
type SuiteSummary struct {
|
type SuiteSummary struct {
|
||||||
|
37
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
37
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
@ -1,6 +1,9 @@
|
|||||||
/*
|
/*
|
||||||
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
|
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// untested sections: 4
|
||||||
|
|
||||||
package format
|
package format
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -33,7 +36,15 @@ var PrintContextObjects = false
|
|||||||
// TruncatedDiff choose if we should display a truncated pretty diff or not
|
// TruncatedDiff choose if we should display a truncated pretty diff or not
|
||||||
var TruncatedDiff = true
|
var TruncatedDiff = true
|
||||||
|
|
||||||
// Ctx interface defined here to keep backwards compatability with go < 1.7
|
// TruncateThreshold (default 50) specifies the maximum length string to print in string comparison assertion error
|
||||||
|
// messages.
|
||||||
|
var TruncateThreshold uint = 50
|
||||||
|
|
||||||
|
// CharactersAroundMismatchToInclude (default 5) specifies how many contextual characters should be printed before and
|
||||||
|
// after the first diff location in a truncated string assertion error message.
|
||||||
|
var CharactersAroundMismatchToInclude uint = 5
|
||||||
|
|
||||||
|
// Ctx interface defined here to keep backwards compatibility with go < 1.7
|
||||||
// It matches the context.Context interface
|
// It matches the context.Context interface
|
||||||
type Ctx interface {
|
type Ctx interface {
|
||||||
Deadline() (deadline time.Time, ok bool)
|
Deadline() (deadline time.Time, ok bool)
|
||||||
@ -58,7 +69,7 @@ Generates a formatted matcher success/failure message of the form:
|
|||||||
<message>
|
<message>
|
||||||
<pretty printed expected>
|
<pretty printed expected>
|
||||||
|
|
||||||
If expected is omited, then the message looks like:
|
If expected is omitted, then the message looks like:
|
||||||
|
|
||||||
Expected
|
Expected
|
||||||
<pretty printed actual>
|
<pretty printed actual>
|
||||||
@ -85,7 +96,7 @@ to equal |
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
func MessageWithDiff(actual, message, expected string) string {
|
func MessageWithDiff(actual, message, expected string) string {
|
||||||
if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold {
|
if TruncatedDiff && len(actual) >= int(TruncateThreshold) && len(expected) >= int(TruncateThreshold) {
|
||||||
diffPoint := findFirstMismatch(actual, expected)
|
diffPoint := findFirstMismatch(actual, expected)
|
||||||
formattedActual := truncateAndFormat(actual, diffPoint)
|
formattedActual := truncateAndFormat(actual, diffPoint)
|
||||||
formattedExpected := truncateAndFormat(expected, diffPoint)
|
formattedExpected := truncateAndFormat(expected, diffPoint)
|
||||||
@ -97,14 +108,23 @@ func MessageWithDiff(actual, message, expected string) string {
|
|||||||
padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|"
|
padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|"
|
||||||
return Message(formattedActual, message+padding, formattedExpected)
|
return Message(formattedActual, message+padding, formattedExpected)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
actual = escapedWithGoSyntax(actual)
|
||||||
|
expected = escapedWithGoSyntax(expected)
|
||||||
|
|
||||||
return Message(actual, message, expected)
|
return Message(actual, message, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func escapedWithGoSyntax(str string) string {
|
||||||
|
withQuotes := fmt.Sprintf("%q", str)
|
||||||
|
return withQuotes[1 : len(withQuotes)-1]
|
||||||
|
}
|
||||||
|
|
||||||
func truncateAndFormat(str string, index int) string {
|
func truncateAndFormat(str string, index int) string {
|
||||||
leftPadding := `...`
|
leftPadding := `...`
|
||||||
rightPadding := `...`
|
rightPadding := `...`
|
||||||
|
|
||||||
start := index - charactersAroundMismatchToInclude
|
start := index - int(CharactersAroundMismatchToInclude)
|
||||||
if start < 0 {
|
if start < 0 {
|
||||||
start = 0
|
start = 0
|
||||||
leftPadding = ""
|
leftPadding = ""
|
||||||
@ -112,7 +132,7 @@ func truncateAndFormat(str string, index int) string {
|
|||||||
|
|
||||||
// slice index must include the mis-matched character
|
// slice index must include the mis-matched character
|
||||||
lengthOfMismatchedCharacter := 1
|
lengthOfMismatchedCharacter := 1
|
||||||
end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter
|
end := index + int(CharactersAroundMismatchToInclude) + lengthOfMismatchedCharacter
|
||||||
if end > len(str) {
|
if end > len(str) {
|
||||||
end = len(str)
|
end = len(str)
|
||||||
rightPadding = ""
|
rightPadding = ""
|
||||||
@ -141,11 +161,6 @@ func findFirstMismatch(a, b string) int {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
truncateThreshold = 50
|
|
||||||
charactersAroundMismatchToInclude = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Pretty prints the passed in object at the passed in indentation level.
|
Pretty prints the passed in object at the passed in indentation level.
|
||||||
|
|
||||||
@ -288,7 +303,7 @@ func formatString(object interface{}, indentation uint) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s", result)
|
return result
|
||||||
} else {
|
} else {
|
||||||
return fmt.Sprintf("%q", object)
|
return fmt.Sprintf("%q", object)
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
10
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
|||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const GOMEGA_VERSION = "1.5.0"
|
const GOMEGA_VERSION = "1.7.0"
|
||||||
|
|
||||||
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
||||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||||
@ -155,7 +155,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
|
|||||||
// ExpectWithOffset(1, "foo").To(Equal("foo"))
|
// ExpectWithOffset(1, "foo").To(Equal("foo"))
|
||||||
//
|
//
|
||||||
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
|
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
|
||||||
// this is used to modify the call-stack offset when computing line numbers.
|
// that is used to modify the call-stack offset when computing line numbers.
|
||||||
//
|
//
|
||||||
// This is most useful in helper functions that make assertions. If you want Gomega's
|
// This is most useful in helper functions that make assertions. If you want Gomega's
|
||||||
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
|
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
|
||||||
@ -242,7 +242,7 @@ func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface
|
|||||||
// assert that all other values are nil/zero.
|
// assert that all other values are nil/zero.
|
||||||
// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
|
// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
|
||||||
//
|
//
|
||||||
// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
|
// Consistently is useful in cases where you want to assert that something *does not happen* over a period of time.
|
||||||
// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
|
// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
|
||||||
//
|
//
|
||||||
// Consistently(channel).ShouldNot(Receive())
|
// Consistently(channel).ShouldNot(Receive())
|
||||||
@ -280,7 +280,7 @@ func SetDefaultEventuallyPollingInterval(t time.Duration) {
|
|||||||
defaultEventuallyPollingInterval = t
|
defaultEventuallyPollingInterval = t
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
|
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long.
|
||||||
func SetDefaultConsistentlyDuration(t time.Duration) {
|
func SetDefaultConsistentlyDuration(t time.Duration) {
|
||||||
defaultConsistentlyDuration = t
|
defaultConsistentlyDuration = t
|
||||||
}
|
}
|
||||||
@ -320,7 +320,7 @@ type GomegaAsyncAssertion = AsyncAssertion
|
|||||||
// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
||||||
// and is used to annotate failure messages.
|
// and is used to annotate failure messages.
|
||||||
//
|
//
|
||||||
// All methods return a bool that is true if hte assertion passed and false if it failed.
|
// All methods return a bool that is true if the assertion passed and false if it failed.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
//
|
//
|
||||||
|
2
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
2
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package asyncassertion
|
package asyncassertion
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
16
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
16
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
@ -269,6 +269,22 @@ func ContainElement(element interface{}) types.GomegaMatcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//BeElementOf succeeds if actual is contained in the passed in elements.
|
||||||
|
//BeElementOf() always uses Equal() to perform the match.
|
||||||
|
//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
|
||||||
|
//as the reverse of ContainElement() that operates with Equal() to perform the match.
|
||||||
|
// Expect(2).Should(BeElementOf([]int{1, 2}))
|
||||||
|
// Expect(2).Should(BeElementOf([2]int{1, 2}))
|
||||||
|
//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
|
||||||
|
// Expect(2).Should(BeElementOf(1, 2))
|
||||||
|
//
|
||||||
|
//Actual must be typed.
|
||||||
|
func BeElementOf(elements ...interface{}) types.GomegaMatcher {
|
||||||
|
return &matchers.BeElementOfMatcher{
|
||||||
|
Elements: elements,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
|
//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
|
||||||
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
|
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
|
||||||
//
|
//
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 5
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 5
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 3
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
57
vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
generated
vendored
Normal file
57
vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// untested sections: 1
|
||||||
|
|
||||||
|
package matchers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega/format"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BeElementOfMatcher struct {
|
||||||
|
Elements []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||||
|
if reflect.TypeOf(actual) == nil {
|
||||||
|
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
|
||||||
|
}
|
||||||
|
|
||||||
|
length := len(matcher.Elements)
|
||||||
|
valueAt := func(i int) interface{} {
|
||||||
|
return matcher.Elements[i]
|
||||||
|
}
|
||||||
|
// Special handling of a single element of type Array or Slice
|
||||||
|
if length == 1 && isArrayOrSlice(valueAt(0)) {
|
||||||
|
element := valueAt(0)
|
||||||
|
value := reflect.ValueOf(element)
|
||||||
|
length = value.Len()
|
||||||
|
valueAt = func(i int) interface{} {
|
||||||
|
return value.Index(i).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var lastError error
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
matcher := &EqualMatcher{Expected: valueAt(i)}
|
||||||
|
success, err := matcher.Match(actual)
|
||||||
|
if err != nil {
|
||||||
|
lastError = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if success {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, lastError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
|
||||||
|
return format.Message(actual, "to be an element of", matcher.Elements)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||||
|
return format.Message(actual, "not to be an element of", matcher.Elements)
|
||||||
|
}
|
2
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_identical_to.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_identical_to.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import "github.com/onsi/gomega/format"
|
import "github.com/onsi/gomega/format"
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 4
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 3
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 3
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 3
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
22
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
22
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -22,19 +24,21 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
value := reflect.ValueOf(actual)
|
value := reflect.ValueOf(actual)
|
||||||
var keys []reflect.Value
|
var valueAt func(int) interface{}
|
||||||
if isMap(actual) {
|
if isMap(actual) {
|
||||||
keys = value.MapKeys()
|
keys := value.MapKeys()
|
||||||
|
valueAt = func(i int) interface{} {
|
||||||
|
return value.MapIndex(keys[i]).Interface()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
valueAt = func(i int) interface{} {
|
||||||
|
return value.Index(i).Interface()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var lastError error
|
var lastError error
|
||||||
for i := 0; i < value.Len(); i++ {
|
for i := 0; i < value.Len(); i++ {
|
||||||
var success bool
|
success, err := elemMatcher.Match(valueAt(i))
|
||||||
var err error
|
|
||||||
if isMap(actual) {
|
|
||||||
success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
|
|
||||||
} else {
|
|
||||||
success, err = elemMatcher.Match(value.Index(i).Interface())
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastError = err
|
lastError = err
|
||||||
continue
|
continue
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 6
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections:10
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 2
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/receive_matcher.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/receive_matcher.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 3
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
2
vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
generated
vendored
2
vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// untested sections: 5
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
package bipartitegraph
|
package bipartitegraph
|
||||||
|
|
||||||
import "errors"
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
import . "github.com/onsi/gomega/matchers/support/goraph/node"
|
import . "github.com/onsi/gomega/matchers/support/goraph/node"
|
||||||
@ -28,7 +27,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
|
|||||||
for j, rightValue := range rightValues {
|
for j, rightValue := range rightValues {
|
||||||
neighbours, err := neighbours(leftValue, rightValue)
|
neighbours, err := neighbours(leftValue, rightValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error()))
|
return nil, fmt.Errorf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if neighbours {
|
if neighbours {
|
||||||
|
3
vendor/github.com/onsi/gomega/matchers/type_support.go
generated
vendored
3
vendor/github.com/onsi/gomega/matchers/type_support.go
generated
vendored
@ -6,6 +6,9 @@ See the docs for Gomega for documentation on the matchers
|
|||||||
|
|
||||||
http://onsi.github.io/gomega/
|
http://onsi.github.io/gomega/
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// untested sections: 11
|
||||||
|
|
||||||
package matchers
|
package matchers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
18
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
18
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -14,9 +14,10 @@
|
|||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -27,7 +28,7 @@ import (
|
|||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
|
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
|
||||||
// implements it.
|
// implements it.
|
||||||
type enhancedWriter interface {
|
type enhancedWriter interface {
|
||||||
io.Writer
|
io.Writer
|
||||||
@ -37,14 +38,13 @@ type enhancedWriter interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
initialBufSize = 512
|
|
||||||
initialNumBufSize = 24
|
initialNumBufSize = 24
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bufPool = sync.Pool{
|
bufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
|
return bufio.NewWriter(ioutil.Discard)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
numBufPool = sync.Pool{
|
numBufPool = sync.Pool{
|
||||||
@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try the interface upgrade. If it doesn't work, we'll use a
|
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||||
// bytes.Buffer from the sync.Pool and write out its content to out in a
|
// bufio.Writer from the sync.Pool.
|
||||||
// single go in the end.
|
|
||||||
w, ok := out.(enhancedWriter)
|
w, ok := out.(enhancedWriter)
|
||||||
if !ok {
|
if !ok {
|
||||||
b := bufPool.Get().(*bytes.Buffer)
|
b := bufPool.Get().(*bufio.Writer)
|
||||||
b.Reset()
|
b.Reset(out)
|
||||||
w = b
|
w = b
|
||||||
defer func() {
|
defer func() {
|
||||||
bWritten, bErr := out.Write(b.Bytes())
|
bErr := b.Flush()
|
||||||
written = bWritten
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = bErr
|
err = bErr
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
13
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||||||
// - Other labels have to be added to currentLabels for signature calculation.
|
// - Other labels have to be added to currentLabels for signature calculation.
|
||||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||||
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
||||||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||||
// Create a more helpful error message.
|
// Create a more helpful error message.
|
||||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||||
return nil
|
return nil
|
||||||
@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||||||
// Similar special treatment of histograms.
|
// Similar special treatment of histograms.
|
||||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
if p.currentLabelPair.GetName() == model.BucketLabel {
|
if p.currentLabelPair.GetName() == model.BucketLabel {
|
||||||
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||||
// Create a more helpful error message.
|
// Create a more helpful error message.
|
||||||
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
||||||
return nil
|
return nil
|
||||||
@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn {
|
|||||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||||
return nil // Unexpected end of input.
|
return nil // Unexpected end of input.
|
||||||
}
|
}
|
||||||
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
|
value, err := parseFloat(p.currentToken.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Create a more helpful error message.
|
// Create a more helpful error message.
|
||||||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
||||||
@ -755,3 +755,10 @@ func histogramMetricName(name string) string {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseFloat(s string) (float64, error) {
|
||||||
|
if strings.ContainsAny(s, "pP_") {
|
||||||
|
return 0, fmt.Errorf("unsupported character in float")
|
||||||
|
}
|
||||||
|
return strconv.ParseFloat(s, 64)
|
||||||
|
}
|
||||||
|
85
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ARPEntry contains a single row of the columnar data represented in
|
||||||
|
// /proc/net/arp.
|
||||||
|
type ARPEntry struct {
|
||||||
|
// IP address
|
||||||
|
IPAddr net.IP
|
||||||
|
// MAC address
|
||||||
|
HWAddr net.HardwareAddr
|
||||||
|
// Name of the device
|
||||||
|
Device string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||||
|
// and then return a slice of ARPEntry's.
|
||||||
|
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseARPEntries(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
entries := make([]ARPEntry, 0)
|
||||||
|
var err error
|
||||||
|
const (
|
||||||
|
expectedDataWidth = 6
|
||||||
|
expectedHeaderWidth = 9
|
||||||
|
)
|
||||||
|
for _, line := range lines {
|
||||||
|
columns := strings.Fields(line)
|
||||||
|
width := len(columns)
|
||||||
|
|
||||||
|
if width == expectedHeaderWidth || width == 0 {
|
||||||
|
continue
|
||||||
|
} else if width == expectedDataWidth {
|
||||||
|
entry, err := parseARPEntry(columns)
|
||||||
|
if err != nil {
|
||||||
|
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
} else {
|
||||||
|
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseARPEntry(columns []string) (ARPEntry, error) {
|
||||||
|
ip := net.ParseIP(columns[0])
|
||||||
|
mac := net.HardwareAddr(columns[3])
|
||||||
|
|
||||||
|
entry := ARPEntry{
|
||||||
|
IPAddr: ip,
|
||||||
|
HWAddr: mac,
|
||||||
|
Device: columns[5],
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
2
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
2
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
@ -31,7 +31,7 @@ type BuddyInfo struct {
|
|||||||
Sizes []float64
|
Sizes []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||||
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
||||||
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
166
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
Normal file
166
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
||||||
|
type CPUInfo struct {
|
||||||
|
Processor uint
|
||||||
|
VendorID string
|
||||||
|
CPUFamily string
|
||||||
|
Model string
|
||||||
|
ModelName string
|
||||||
|
Stepping string
|
||||||
|
Microcode string
|
||||||
|
CPUMHz float64
|
||||||
|
CacheSize string
|
||||||
|
PhysicalID string
|
||||||
|
Siblings uint
|
||||||
|
CoreID string
|
||||||
|
CPUCores uint
|
||||||
|
APICID string
|
||||||
|
InitialAPICID string
|
||||||
|
FPU string
|
||||||
|
FPUException string
|
||||||
|
CPUIDLevel uint
|
||||||
|
WP string
|
||||||
|
Flags []string
|
||||||
|
Bugs []string
|
||||||
|
BogoMips float64
|
||||||
|
CLFlushSize uint
|
||||||
|
CacheAlignment uint
|
||||||
|
AddressSizes string
|
||||||
|
PowerManagement string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPUInfo returns information about current system CPUs.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("cpuinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseCPUInfo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCPUInfo parses data from /proc/cpuinfo
|
||||||
|
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||||
|
cpuinfo := []CPUInfo{}
|
||||||
|
i := -1
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if strings.TrimSpace(line) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
field := strings.SplitN(line, ": ", 2)
|
||||||
|
switch strings.TrimSpace(field[0]) {
|
||||||
|
case "processor":
|
||||||
|
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||||
|
i++
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].Processor = uint(v)
|
||||||
|
case "vendor_id":
|
||||||
|
cpuinfo[i].VendorID = field[1]
|
||||||
|
case "cpu family":
|
||||||
|
cpuinfo[i].CPUFamily = field[1]
|
||||||
|
case "model":
|
||||||
|
cpuinfo[i].Model = field[1]
|
||||||
|
case "model name":
|
||||||
|
cpuinfo[i].ModelName = field[1]
|
||||||
|
case "stepping":
|
||||||
|
cpuinfo[i].Stepping = field[1]
|
||||||
|
case "microcode":
|
||||||
|
cpuinfo[i].Microcode = field[1]
|
||||||
|
case "cpu MHz":
|
||||||
|
v, err := strconv.ParseFloat(field[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUMHz = v
|
||||||
|
case "cache size":
|
||||||
|
cpuinfo[i].CacheSize = field[1]
|
||||||
|
case "physical id":
|
||||||
|
cpuinfo[i].PhysicalID = field[1]
|
||||||
|
case "siblings":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].Siblings = uint(v)
|
||||||
|
case "core id":
|
||||||
|
cpuinfo[i].CoreID = field[1]
|
||||||
|
case "cpu cores":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUCores = uint(v)
|
||||||
|
case "apicid":
|
||||||
|
cpuinfo[i].APICID = field[1]
|
||||||
|
case "initial apicid":
|
||||||
|
cpuinfo[i].InitialAPICID = field[1]
|
||||||
|
case "fpu":
|
||||||
|
cpuinfo[i].FPU = field[1]
|
||||||
|
case "fpu_exception":
|
||||||
|
cpuinfo[i].FPUException = field[1]
|
||||||
|
case "cpuid level":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUIDLevel = uint(v)
|
||||||
|
case "wp":
|
||||||
|
cpuinfo[i].WP = field[1]
|
||||||
|
case "flags":
|
||||||
|
cpuinfo[i].Flags = strings.Fields(field[1])
|
||||||
|
case "bugs":
|
||||||
|
cpuinfo[i].Bugs = strings.Fields(field[1])
|
||||||
|
case "bogomips":
|
||||||
|
v, err := strconv.ParseFloat(field[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].BogoMips = v
|
||||||
|
case "clflush size":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CLFlushSize = uint(v)
|
||||||
|
case "cache_alignment":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CacheAlignment = uint(v)
|
||||||
|
case "address sizes":
|
||||||
|
cpuinfo[i].AddressSizes = field[1]
|
||||||
|
case "power management":
|
||||||
|
cpuinfo[i].PowerManagement = field[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cpuinfo, nil
|
||||||
|
|
||||||
|
}
|
131
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
Normal file
131
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Crypto holds info parsed from /proc/crypto.
|
||||||
|
type Crypto struct {
|
||||||
|
Alignmask *uint64
|
||||||
|
Async bool
|
||||||
|
Blocksize *uint64
|
||||||
|
Chunksize *uint64
|
||||||
|
Ctxsize *uint64
|
||||||
|
Digestsize *uint64
|
||||||
|
Driver string
|
||||||
|
Geniv string
|
||||||
|
Internal string
|
||||||
|
Ivsize *uint64
|
||||||
|
Maxauthsize *uint64
|
||||||
|
MaxKeysize *uint64
|
||||||
|
MinKeysize *uint64
|
||||||
|
Module string
|
||||||
|
Name string
|
||||||
|
Priority *int64
|
||||||
|
Refcnt *int64
|
||||||
|
Seedsize *uint64
|
||||||
|
Selftest string
|
||||||
|
Type string
|
||||||
|
Walksize *uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
|
||||||
|
// structs containing the relevant info. More information available here:
|
||||||
|
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||||
|
func (fs FS) Crypto() ([]Crypto, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||||
|
}
|
||||||
|
crypto, err := parseCrypto(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||||
|
}
|
||||||
|
return crypto, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
|
||||||
|
crypto := []Crypto{}
|
||||||
|
|
||||||
|
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
|
||||||
|
|
||||||
|
for _, block := range cryptoBlocks {
|
||||||
|
var newCryptoElem Crypto
|
||||||
|
|
||||||
|
lines := strings.Split(string(block), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if strings.TrimSpace(line) == "" || line[0] == ' ' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := strings.Split(line, ":")
|
||||||
|
key := strings.TrimSpace(fields[0])
|
||||||
|
value := strings.TrimSpace(fields[1])
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
|
||||||
|
switch strings.TrimSpace(key) {
|
||||||
|
case "async":
|
||||||
|
b, err := strconv.ParseBool(value)
|
||||||
|
if err == nil {
|
||||||
|
newCryptoElem.Async = b
|
||||||
|
}
|
||||||
|
case "blocksize":
|
||||||
|
newCryptoElem.Blocksize = vp.PUInt64()
|
||||||
|
case "chunksize":
|
||||||
|
newCryptoElem.Chunksize = vp.PUInt64()
|
||||||
|
case "digestsize":
|
||||||
|
newCryptoElem.Digestsize = vp.PUInt64()
|
||||||
|
case "driver":
|
||||||
|
newCryptoElem.Driver = value
|
||||||
|
case "geniv":
|
||||||
|
newCryptoElem.Geniv = value
|
||||||
|
case "internal":
|
||||||
|
newCryptoElem.Internal = value
|
||||||
|
case "ivsize":
|
||||||
|
newCryptoElem.Ivsize = vp.PUInt64()
|
||||||
|
case "maxauthsize":
|
||||||
|
newCryptoElem.Maxauthsize = vp.PUInt64()
|
||||||
|
case "max keysize":
|
||||||
|
newCryptoElem.MaxKeysize = vp.PUInt64()
|
||||||
|
case "min keysize":
|
||||||
|
newCryptoElem.MinKeysize = vp.PUInt64()
|
||||||
|
case "module":
|
||||||
|
newCryptoElem.Module = value
|
||||||
|
case "name":
|
||||||
|
newCryptoElem.Name = value
|
||||||
|
case "priority":
|
||||||
|
newCryptoElem.Priority = vp.PInt64()
|
||||||
|
case "refcnt":
|
||||||
|
newCryptoElem.Refcnt = vp.PInt64()
|
||||||
|
case "seedsize":
|
||||||
|
newCryptoElem.Seedsize = vp.PUInt64()
|
||||||
|
case "selftest":
|
||||||
|
newCryptoElem.Selftest = value
|
||||||
|
case "type":
|
||||||
|
newCryptoElem.Type = value
|
||||||
|
case "walksize":
|
||||||
|
newCryptoElem.Walksize = vp.PUInt64()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
crypto = append(crypto, newCryptoElem)
|
||||||
|
}
|
||||||
|
return crypto, nil
|
||||||
|
}
|
88
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
88
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||||
|
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||||
|
us := make([]uint32, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, uint32(u))
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
||||||
|
func ParseUint64s(ss []string) ([]uint64, error) {
|
||||||
|
us := make([]uint64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
|
||||||
|
func ParsePInt64s(ss []string) ([]*int64, error) {
|
||||||
|
us := make([]*int64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, &u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBool parses a string into a boolean pointer.
|
||||||
|
func ParseBool(b string) *bool {
|
||||||
|
var truth bool
|
||||||
|
switch b {
|
||||||
|
case "enabled":
|
||||||
|
truth = true
|
||||||
|
case "disabled":
|
||||||
|
truth = false
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &truth
|
||||||
|
}
|
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build linux,!appengine
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||||
|
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||||
|
func SysReadFile(file string) (string, error) {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||||
|
// Go's ioutil.ReadFile implementation to poll forever.
|
||||||
|
//
|
||||||
|
// Since we either want to read data or bail immediately, do the simplest
|
||||||
|
// possible read using syscall directly.
|
||||||
|
b := make([]byte, 128)
|
||||||
|
n, err := syscall.Read(int(f.Fd()), b)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(bytes.TrimSpace(b[:n])), nil
|
||||||
|
}
|
26
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build linux,appengine !linux
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SysReadFile is here implemented as a noop for builds that do not support
|
||||||
|
// the read syscall. For example Windows, or Linux on Google App Engine.
|
||||||
|
func SysReadFile(file string) (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported on this platform")
|
||||||
|
}
|
77
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
Normal file
77
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
|
||||||
|
// somewhere else that is more focused in the future.
|
||||||
|
|
||||||
|
// A ValueParser enables parsing a single string into a variety of data types
|
||||||
|
// in a concise and safe way. The Err method must be invoked after invoking
|
||||||
|
// any other methods to ensure a value was successfully parsed.
|
||||||
|
type ValueParser struct {
|
||||||
|
v string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValueParser creates a ValueParser using the input string.
|
||||||
|
func NewValueParser(v string) *ValueParser {
|
||||||
|
return &ValueParser{v: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PInt64 interprets the underlying value as an int64 and returns a pointer to
|
||||||
|
// that value.
|
||||||
|
func (vp *ValueParser) PInt64() *int64 {
|
||||||
|
if vp.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A base value of zero makes ParseInt infer the correct base using the
|
||||||
|
// string's prefix, if any.
|
||||||
|
const base = 0
|
||||||
|
v, err := strconv.ParseInt(vp.v, base, 64)
|
||||||
|
if err != nil {
|
||||||
|
vp.err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
|
||||||
|
// that value.
|
||||||
|
func (vp *ValueParser) PUInt64() *uint64 {
|
||||||
|
if vp.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A base value of zero makes ParseInt infer the correct base using the
|
||||||
|
// string's prefix, if any.
|
||||||
|
const base = 0
|
||||||
|
v, err := strconv.ParseUint(vp.v, base, 64)
|
||||||
|
if err != nil {
|
||||||
|
vp.err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the last error, if any, encountered by the ValueParser.
|
||||||
|
func (vp *ValueParser) Err() error {
|
||||||
|
return vp.err
|
||||||
|
}
|
91
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
Normal file
91
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For the proc file format details,
|
||||||
|
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||||
|
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||||
|
|
||||||
|
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
|
||||||
|
type SoftnetEntry struct {
|
||||||
|
// Number of processed packets
|
||||||
|
Processed uint
|
||||||
|
// Number of dropped packets
|
||||||
|
Dropped uint
|
||||||
|
// Number of times processing packets ran out of quota
|
||||||
|
TimeSqueezed uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
|
||||||
|
// and then return a slice of SoftnetEntry's.
|
||||||
|
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseSoftnetEntries(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
entries := make([]SoftnetEntry, 0)
|
||||||
|
var err error
|
||||||
|
const (
|
||||||
|
expectedColumns = 11
|
||||||
|
)
|
||||||
|
for _, line := range lines {
|
||||||
|
columns := strings.Fields(line)
|
||||||
|
width := len(columns)
|
||||||
|
if width == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if width != expectedColumns {
|
||||||
|
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
|
||||||
|
}
|
||||||
|
var entry SoftnetEntry
|
||||||
|
if entry, err = parseSoftnetEntry(columns); err != nil {
|
||||||
|
return []SoftnetEntry{}, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
|
||||||
|
var err error
|
||||||
|
var processed, dropped, timeSqueezed uint64
|
||||||
|
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
|
||||||
|
}
|
||||||
|
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
|
||||||
|
}
|
||||||
|
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
|
||||||
|
}
|
||||||
|
return SoftnetEntry{
|
||||||
|
Processed: uint(processed),
|
||||||
|
Dropped: uint(dropped),
|
||||||
|
TimeSqueezed: uint(timeSqueezed),
|
||||||
|
}, nil
|
||||||
|
}
|
30
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
30
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -279,3 +279,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
func (p Proc) path(pa ...string) string {
|
func (p Proc) path(pa ...string) string {
|
||||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileDescriptorsInfo retrieves information about all file descriptors of
|
||||||
|
// the process.
|
||||||
|
func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
|
||||||
|
names, err := p.fileDescriptors()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fdinfos ProcFDInfos
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
fdinfo, err := p.FDInfo(n)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fdinfos = append(fdinfos, *fdinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fdinfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedstat returns task scheduling information for the process.
|
||||||
|
func (p Proc) Schedstat() (ProcSchedstat, error) {
|
||||||
|
contents, err := ioutil.ReadFile(p.path("schedstat"))
|
||||||
|
if err != nil {
|
||||||
|
return ProcSchedstat{}, err
|
||||||
|
}
|
||||||
|
return parseProcSchedstat(string(contents))
|
||||||
|
}
|
||||||
|
132
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
Normal file
132
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Regexp variables
|
||||||
|
var (
|
||||||
|
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
||||||
|
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
||||||
|
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
|
||||||
|
rInotify = regexp.MustCompile(`^inotify`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcFDInfo contains represents file descriptor information.
|
||||||
|
type ProcFDInfo struct {
|
||||||
|
// File descriptor
|
||||||
|
FD string
|
||||||
|
// File offset
|
||||||
|
Pos string
|
||||||
|
// File access mode and status flags
|
||||||
|
Flags string
|
||||||
|
// Mount point ID
|
||||||
|
MntID string
|
||||||
|
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
|
||||||
|
InotifyInfos []InotifyInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
|
||||||
|
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
|
||||||
|
f, err := os.Open(p.path("fdinfo", fd))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
fdinfo, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read %s: %s", f.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var text, pos, flags, mntid string
|
||||||
|
var inotify []InotifyInfo
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(string(fdinfo)))
|
||||||
|
for scanner.Scan() {
|
||||||
|
text = scanner.Text()
|
||||||
|
if rPos.MatchString(text) {
|
||||||
|
pos = rPos.FindStringSubmatch(text)[1]
|
||||||
|
} else if rFlags.MatchString(text) {
|
||||||
|
flags = rFlags.FindStringSubmatch(text)[1]
|
||||||
|
} else if rMntID.MatchString(text) {
|
||||||
|
mntid = rMntID.FindStringSubmatch(text)[1]
|
||||||
|
} else if rInotify.MatchString(text) {
|
||||||
|
newInotify, err := parseInotifyInfo(text)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inotify = append(inotify, *newInotify)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i := &ProcFDInfo{
|
||||||
|
FD: fd,
|
||||||
|
Pos: pos,
|
||||||
|
Flags: flags,
|
||||||
|
MntID: mntid,
|
||||||
|
InotifyInfos: inotify,
|
||||||
|
}
|
||||||
|
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InotifyInfo represents a single inotify line in the fdinfo file.
|
||||||
|
type InotifyInfo struct {
|
||||||
|
// Watch descriptor number
|
||||||
|
WD string
|
||||||
|
// Inode number
|
||||||
|
Ino string
|
||||||
|
// Device ID
|
||||||
|
Sdev string
|
||||||
|
// Mask of events being monitored
|
||||||
|
Mask string
|
||||||
|
}
|
||||||
|
|
||||||
|
// InotifyInfo constructor. Only available on kernel 3.8+.
|
||||||
|
func parseInotifyInfo(line string) (*InotifyInfo, error) {
|
||||||
|
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
|
||||||
|
m := r.FindStringSubmatch(line)
|
||||||
|
i := &InotifyInfo{
|
||||||
|
WD: m[1],
|
||||||
|
Ino: m[2],
|
||||||
|
Sdev: m[3],
|
||||||
|
Mask: m[4],
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcFDInfos represents a list of ProcFDInfo structs.
|
||||||
|
type ProcFDInfos []ProcFDInfo
|
||||||
|
|
||||||
|
func (p ProcFDInfos) Len() int { return len(p) }
|
||||||
|
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
|
||||||
|
|
||||||
|
// InotifyWatchLen returns the total number of inotify watches
|
||||||
|
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
|
||||||
|
length := 0
|
||||||
|
for _, f := range p {
|
||||||
|
length += len(f.InotifyInfos)
|
||||||
|
}
|
||||||
|
|
||||||
|
return length, nil
|
||||||
|
}
|
7
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
7
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcStat provides status information about the process,
|
// ProcStatus provides status information about the process,
|
||||||
// read from /proc/[pid]/stat.
|
// read from /proc/[pid]/stat.
|
||||||
type ProcStatus struct {
|
type ProcStatus struct {
|
||||||
// The process ID.
|
// The process ID.
|
||||||
@ -29,6 +29,9 @@ type ProcStatus struct {
|
|||||||
// The process name.
|
// The process name.
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
|
// Thread group ID.
|
||||||
|
TGID int
|
||||||
|
|
||||||
// Peak virtual memory size.
|
// Peak virtual memory size.
|
||||||
VmPeak uint64
|
VmPeak uint64
|
||||||
// Virtual memory size.
|
// Virtual memory size.
|
||||||
@ -113,6 +116,8 @@ func (p Proc) NewStatus() (ProcStatus, error) {
|
|||||||
|
|
||||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||||
switch k {
|
switch k {
|
||||||
|
case "Tgid":
|
||||||
|
s.TGID = int(vUint)
|
||||||
case "Name":
|
case "Name":
|
||||||
s.Name = vString
|
s.Name = vString
|
||||||
case "VmPeak":
|
case "VmPeak":
|
||||||
|
118
vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
Normal file
118
vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
|
||||||
|
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schedstat contains scheduler statistics from /proc/schedstat
|
||||||
|
//
|
||||||
|
// See
|
||||||
|
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
|
||||||
|
// for a detailed description of what these numbers mean.
|
||||||
|
//
|
||||||
|
// Note the current kernel documentation claims some of the time units are in
|
||||||
|
// jiffies when they are actually in nanoseconds since 2.6.23 with the
|
||||||
|
// introduction of CFS. A fix to the documentation is pending. See
|
||||||
|
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
|
||||||
|
type Schedstat struct {
|
||||||
|
CPUs []*SchedstatCPU
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedstatCPU contains the values from one "cpu<N>" line
|
||||||
|
type SchedstatCPU struct {
|
||||||
|
CPUNum string
|
||||||
|
|
||||||
|
RunningNanoseconds uint64
|
||||||
|
WaitingNanoseconds uint64
|
||||||
|
RunTimeslices uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcSchedstat contains the values from /proc/<pid>/schedstat
|
||||||
|
type ProcSchedstat struct {
|
||||||
|
RunningNanoseconds uint64
|
||||||
|
WaitingNanoseconds uint64
|
||||||
|
RunTimeslices uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedstat reads data from /proc/schedstat
|
||||||
|
func (fs FS) Schedstat() (*Schedstat, error) {
|
||||||
|
file, err := os.Open(fs.proc.Path("schedstat"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
stats := &Schedstat{}
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
match := cpuLineRE.FindStringSubmatch(scanner.Text())
|
||||||
|
if match != nil {
|
||||||
|
cpu := &SchedstatCPU{}
|
||||||
|
cpu.CPUNum = match[1]
|
||||||
|
|
||||||
|
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.CPUs = append(stats.CPUs, cpu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
|
||||||
|
match := procLineRE.FindStringSubmatch(contents)
|
||||||
|
|
||||||
|
if match != nil {
|
||||||
|
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = errors.New("could not parse schedstat")
|
||||||
|
return
|
||||||
|
}
|
210
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
Normal file
210
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The VM interface is described at
|
||||||
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
|
// Each setting is exposed as a single file.
|
||||||
|
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
||||||
|
// and numa_zonelist_order (deprecated) which is a string
|
||||||
|
type VM struct {
|
||||||
|
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
|
||||||
|
BlockDump *int64 // /proc/sys/vm/block_dump
|
||||||
|
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
|
||||||
|
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
|
||||||
|
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
|
||||||
|
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
|
||||||
|
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
|
||||||
|
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
|
||||||
|
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
|
||||||
|
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
|
||||||
|
DropCaches *int64 // /proc/sys/vm/drop_caches
|
||||||
|
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
|
||||||
|
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
|
||||||
|
LaptopMode *int64 // /proc/sys/vm/laptop_mode
|
||||||
|
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
|
||||||
|
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
|
||||||
|
MaxMapCount *int64 // /proc/sys/vm/max_map_count
|
||||||
|
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
|
||||||
|
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
|
||||||
|
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
|
||||||
|
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
|
||||||
|
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
|
||||||
|
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
|
||||||
|
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
|
||||||
|
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
|
||||||
|
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
|
||||||
|
NumaStat *int64 // /proc/sys/vm/numa_stat
|
||||||
|
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
|
||||||
|
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
|
||||||
|
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
|
||||||
|
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
|
||||||
|
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
|
||||||
|
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
|
||||||
|
PageCluster *int64 // /proc/sys/vm/page-cluster
|
||||||
|
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
|
||||||
|
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
|
||||||
|
StatInterval *int64 // /proc/sys/vm/stat_interval
|
||||||
|
Swappiness *int64 // /proc/sys/vm/swappiness
|
||||||
|
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
|
||||||
|
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
|
||||||
|
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
|
||||||
|
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
|
||||||
|
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// VM reads the VM statistics from the specified `proc` filesystem.
|
||||||
|
func (fs FS) VM() (*VM, error) {
|
||||||
|
path := fs.proc.Path("sys/vm")
|
||||||
|
file, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !file.Mode().IsDir() {
|
||||||
|
return nil, fmt.Errorf("%s is not a directory", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var vm VM
|
||||||
|
for _, f := range files {
|
||||||
|
if f.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := filepath.Join(path, f.Name())
|
||||||
|
// ignore errors on read, as there are some write only
|
||||||
|
// in /proc/sys/vm
|
||||||
|
value, err := util.SysReadFile(name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
|
||||||
|
switch f.Name() {
|
||||||
|
case "admin_reserve_kbytes":
|
||||||
|
vm.AdminReserveKbytes = vp.PInt64()
|
||||||
|
case "block_dump":
|
||||||
|
vm.BlockDump = vp.PInt64()
|
||||||
|
case "compact_unevictable_allowed":
|
||||||
|
vm.CompactUnevictableAllowed = vp.PInt64()
|
||||||
|
case "dirty_background_bytes":
|
||||||
|
vm.DirtyBackgroundBytes = vp.PInt64()
|
||||||
|
case "dirty_background_ratio":
|
||||||
|
vm.DirtyBackgroundRatio = vp.PInt64()
|
||||||
|
case "dirty_bytes":
|
||||||
|
vm.DirtyBytes = vp.PInt64()
|
||||||
|
case "dirty_expire_centisecs":
|
||||||
|
vm.DirtyExpireCentisecs = vp.PInt64()
|
||||||
|
case "dirty_ratio":
|
||||||
|
vm.DirtyRatio = vp.PInt64()
|
||||||
|
case "dirtytime_expire_seconds":
|
||||||
|
vm.DirtytimeExpireSeconds = vp.PInt64()
|
||||||
|
case "dirty_writeback_centisecs":
|
||||||
|
vm.DirtyWritebackCentisecs = vp.PInt64()
|
||||||
|
case "drop_caches":
|
||||||
|
vm.DropCaches = vp.PInt64()
|
||||||
|
case "extfrag_threshold":
|
||||||
|
vm.ExtfragThreshold = vp.PInt64()
|
||||||
|
case "hugetlb_shm_group":
|
||||||
|
vm.HugetlbShmGroup = vp.PInt64()
|
||||||
|
case "laptop_mode":
|
||||||
|
vm.LaptopMode = vp.PInt64()
|
||||||
|
case "legacy_va_layout":
|
||||||
|
vm.LegacyVaLayout = vp.PInt64()
|
||||||
|
case "lowmem_reserve_ratio":
|
||||||
|
stringSlice := strings.Fields(value)
|
||||||
|
pint64Slice := make([]*int64, 0, len(stringSlice))
|
||||||
|
for _, value := range stringSlice {
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
pint64Slice = append(pint64Slice, vp.PInt64())
|
||||||
|
}
|
||||||
|
vm.LowmemReserveRatio = pint64Slice
|
||||||
|
case "max_map_count":
|
||||||
|
vm.MaxMapCount = vp.PInt64()
|
||||||
|
case "memory_failure_early_kill":
|
||||||
|
vm.MemoryFailureEarlyKill = vp.PInt64()
|
||||||
|
case "memory_failure_recovery":
|
||||||
|
vm.MemoryFailureRecovery = vp.PInt64()
|
||||||
|
case "min_free_kbytes":
|
||||||
|
vm.MinFreeKbytes = vp.PInt64()
|
||||||
|
case "min_slab_ratio":
|
||||||
|
vm.MinSlabRatio = vp.PInt64()
|
||||||
|
case "min_unmapped_ratio":
|
||||||
|
vm.MinUnmappedRatio = vp.PInt64()
|
||||||
|
case "mmap_min_addr":
|
||||||
|
vm.MmapMinAddr = vp.PInt64()
|
||||||
|
case "nr_hugepages":
|
||||||
|
vm.NrHugepages = vp.PInt64()
|
||||||
|
case "nr_hugepages_mempolicy":
|
||||||
|
vm.NrHugepagesMempolicy = vp.PInt64()
|
||||||
|
case "nr_overcommit_hugepages":
|
||||||
|
vm.NrOvercommitHugepages = vp.PInt64()
|
||||||
|
case "numa_stat":
|
||||||
|
vm.NumaStat = vp.PInt64()
|
||||||
|
case "numa_zonelist_order":
|
||||||
|
vm.NumaZonelistOrder = value
|
||||||
|
case "oom_dump_tasks":
|
||||||
|
vm.OomDumpTasks = vp.PInt64()
|
||||||
|
case "oom_kill_allocating_task":
|
||||||
|
vm.OomKillAllocatingTask = vp.PInt64()
|
||||||
|
case "overcommit_kbytes":
|
||||||
|
vm.OvercommitKbytes = vp.PInt64()
|
||||||
|
case "overcommit_memory":
|
||||||
|
vm.OvercommitMemory = vp.PInt64()
|
||||||
|
case "overcommit_ratio":
|
||||||
|
vm.OvercommitRatio = vp.PInt64()
|
||||||
|
case "page-cluster":
|
||||||
|
vm.PageCluster = vp.PInt64()
|
||||||
|
case "panic_on_oom":
|
||||||
|
vm.PanicOnOom = vp.PInt64()
|
||||||
|
case "percpu_pagelist_fraction":
|
||||||
|
vm.PercpuPagelistFraction = vp.PInt64()
|
||||||
|
case "stat_interval":
|
||||||
|
vm.StatInterval = vp.PInt64()
|
||||||
|
case "swappiness":
|
||||||
|
vm.Swappiness = vp.PInt64()
|
||||||
|
case "user_reserve_kbytes":
|
||||||
|
vm.UserReserveKbytes = vp.PInt64()
|
||||||
|
case "vfs_cache_pressure":
|
||||||
|
vm.VfsCachePressure = vp.PInt64()
|
||||||
|
case "watermark_boost_factor":
|
||||||
|
vm.WatermarkBoostFactor = vp.PInt64()
|
||||||
|
case "watermark_scale_factor":
|
||||||
|
vm.WatermarkScaleFactor = vp.PInt64()
|
||||||
|
case "zone_reclaim_mode":
|
||||||
|
vm.ZoneReclaimMode = vp.PInt64()
|
||||||
|
}
|
||||||
|
if err := vp.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &vm, nil
|
||||||
|
}
|
196
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
Normal file
196
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Zoneinfo holds info parsed from /proc/zoneinfo.
|
||||||
|
type Zoneinfo struct {
|
||||||
|
Node string
|
||||||
|
Zone string
|
||||||
|
NrFreePages *int64
|
||||||
|
Min *int64
|
||||||
|
Low *int64
|
||||||
|
High *int64
|
||||||
|
Scanned *int64
|
||||||
|
Spanned *int64
|
||||||
|
Present *int64
|
||||||
|
Managed *int64
|
||||||
|
NrActiveAnon *int64
|
||||||
|
NrInactiveAnon *int64
|
||||||
|
NrIsolatedAnon *int64
|
||||||
|
NrAnonPages *int64
|
||||||
|
NrAnonTransparentHugepages *int64
|
||||||
|
NrActiveFile *int64
|
||||||
|
NrInactiveFile *int64
|
||||||
|
NrIsolatedFile *int64
|
||||||
|
NrFilePages *int64
|
||||||
|
NrSlabReclaimable *int64
|
||||||
|
NrSlabUnreclaimable *int64
|
||||||
|
NrMlockStack *int64
|
||||||
|
NrKernelStack *int64
|
||||||
|
NrMapped *int64
|
||||||
|
NrDirty *int64
|
||||||
|
NrWriteback *int64
|
||||||
|
NrUnevictable *int64
|
||||||
|
NrShmem *int64
|
||||||
|
NrDirtied *int64
|
||||||
|
NrWritten *int64
|
||||||
|
NumaHit *int64
|
||||||
|
NumaMiss *int64
|
||||||
|
NumaForeign *int64
|
||||||
|
NumaInterleave *int64
|
||||||
|
NumaLocal *int64
|
||||||
|
NumaOther *int64
|
||||||
|
Protection []*int64
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
||||||
|
|
||||||
|
// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
|
||||||
|
// structs containing the relevant info. More information available here:
|
||||||
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
|
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
|
||||||
|
}
|
||||||
|
zoneinfo, err := parseZoneinfo(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
|
||||||
|
}
|
||||||
|
return zoneinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
|
||||||
|
|
||||||
|
zoneinfo := []Zoneinfo{}
|
||||||
|
|
||||||
|
zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
|
||||||
|
for _, block := range zoneinfoBlocks {
|
||||||
|
var zoneinfoElement Zoneinfo
|
||||||
|
lines := strings.Split(string(block), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
|
||||||
|
if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
|
||||||
|
zoneinfoElement.Node = nodeZone[1]
|
||||||
|
zoneinfoElement.Zone = nodeZone[2]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
|
||||||
|
zoneinfoElement.Zone = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Fields(strings.TrimSpace(line))
|
||||||
|
if len(parts) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vp := util.NewValueParser(parts[1])
|
||||||
|
switch parts[0] {
|
||||||
|
case "nr_free_pages":
|
||||||
|
zoneinfoElement.NrFreePages = vp.PInt64()
|
||||||
|
case "min":
|
||||||
|
zoneinfoElement.Min = vp.PInt64()
|
||||||
|
case "low":
|
||||||
|
zoneinfoElement.Low = vp.PInt64()
|
||||||
|
case "high":
|
||||||
|
zoneinfoElement.High = vp.PInt64()
|
||||||
|
case "scanned":
|
||||||
|
zoneinfoElement.Scanned = vp.PInt64()
|
||||||
|
case "spanned":
|
||||||
|
zoneinfoElement.Spanned = vp.PInt64()
|
||||||
|
case "present":
|
||||||
|
zoneinfoElement.Present = vp.PInt64()
|
||||||
|
case "managed":
|
||||||
|
zoneinfoElement.Managed = vp.PInt64()
|
||||||
|
case "nr_active_anon":
|
||||||
|
zoneinfoElement.NrActiveAnon = vp.PInt64()
|
||||||
|
case "nr_inactive_anon":
|
||||||
|
zoneinfoElement.NrInactiveAnon = vp.PInt64()
|
||||||
|
case "nr_isolated_anon":
|
||||||
|
zoneinfoElement.NrIsolatedAnon = vp.PInt64()
|
||||||
|
case "nr_anon_pages":
|
||||||
|
zoneinfoElement.NrAnonPages = vp.PInt64()
|
||||||
|
case "nr_anon_transparent_hugepages":
|
||||||
|
zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
|
||||||
|
case "nr_active_file":
|
||||||
|
zoneinfoElement.NrActiveFile = vp.PInt64()
|
||||||
|
case "nr_inactive_file":
|
||||||
|
zoneinfoElement.NrInactiveFile = vp.PInt64()
|
||||||
|
case "nr_isolated_file":
|
||||||
|
zoneinfoElement.NrIsolatedFile = vp.PInt64()
|
||||||
|
case "nr_file_pages":
|
||||||
|
zoneinfoElement.NrFilePages = vp.PInt64()
|
||||||
|
case "nr_slab_reclaimable":
|
||||||
|
zoneinfoElement.NrSlabReclaimable = vp.PInt64()
|
||||||
|
case "nr_slab_unreclaimable":
|
||||||
|
zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
|
||||||
|
case "nr_mlock_stack":
|
||||||
|
zoneinfoElement.NrMlockStack = vp.PInt64()
|
||||||
|
case "nr_kernel_stack":
|
||||||
|
zoneinfoElement.NrKernelStack = vp.PInt64()
|
||||||
|
case "nr_mapped":
|
||||||
|
zoneinfoElement.NrMapped = vp.PInt64()
|
||||||
|
case "nr_dirty":
|
||||||
|
zoneinfoElement.NrDirty = vp.PInt64()
|
||||||
|
case "nr_writeback":
|
||||||
|
zoneinfoElement.NrWriteback = vp.PInt64()
|
||||||
|
case "nr_unevictable":
|
||||||
|
zoneinfoElement.NrUnevictable = vp.PInt64()
|
||||||
|
case "nr_shmem":
|
||||||
|
zoneinfoElement.NrShmem = vp.PInt64()
|
||||||
|
case "nr_dirtied":
|
||||||
|
zoneinfoElement.NrDirtied = vp.PInt64()
|
||||||
|
case "nr_written":
|
||||||
|
zoneinfoElement.NrWritten = vp.PInt64()
|
||||||
|
case "numa_hit":
|
||||||
|
zoneinfoElement.NumaHit = vp.PInt64()
|
||||||
|
case "numa_miss":
|
||||||
|
zoneinfoElement.NumaMiss = vp.PInt64()
|
||||||
|
case "numa_foreign":
|
||||||
|
zoneinfoElement.NumaForeign = vp.PInt64()
|
||||||
|
case "numa_interleave":
|
||||||
|
zoneinfoElement.NumaInterleave = vp.PInt64()
|
||||||
|
case "numa_local":
|
||||||
|
zoneinfoElement.NumaLocal = vp.PInt64()
|
||||||
|
case "numa_other":
|
||||||
|
zoneinfoElement.NumaOther = vp.PInt64()
|
||||||
|
case "protection:":
|
||||||
|
protectionParts := strings.Split(line, ":")
|
||||||
|
protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
|
||||||
|
protectionValues = strings.Replace(protectionValues, ")", "", 1)
|
||||||
|
protectionValues = strings.TrimSpace(protectionValues)
|
||||||
|
protectionStringMap := strings.Split(protectionValues, ", ")
|
||||||
|
val, err := util.ParsePInt64s(protectionStringMap)
|
||||||
|
if err == nil {
|
||||||
|
zoneinfoElement.Protection = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
zoneinfo = append(zoneinfo, zoneinfoElement)
|
||||||
|
}
|
||||||
|
return zoneinfo, nil
|
||||||
|
}
|
38
vendor/github.com/spf13/pflag/bool_slice.go
generated
vendored
38
vendor/github.com/spf13/pflag/bool_slice.go
generated
vendored
@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string {
|
|||||||
return "[" + out + "]"
|
return "[" + out + "]"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *boolSliceValue) fromString(val string) (bool, error) {
|
||||||
|
return strconv.ParseBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *boolSliceValue) toString(val bool) string {
|
||||||
|
return strconv.FormatBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *boolSliceValue) Append(val string) error {
|
||||||
|
i, err := s.fromString(val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*s.value = append(*s.value, i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *boolSliceValue) Replace(val []string) error {
|
||||||
|
out := make([]bool, len(val))
|
||||||
|
for i, d := range val {
|
||||||
|
var err error
|
||||||
|
out[i], err = s.fromString(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*s.value = out
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *boolSliceValue) GetSlice() []string {
|
||||||
|
out := make([]string, len(*s.value))
|
||||||
|
for i, d := range *s.value {
|
||||||
|
out[i] = s.toString(d)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
func boolSliceConv(val string) (interface{}, error) {
|
func boolSliceConv(val string) (interface{}, error) {
|
||||||
val = strings.Trim(val, "[]")
|
val = strings.Trim(val, "[]")
|
||||||
// Empty string would cause a slice with one (empty) entry
|
// Empty string would cause a slice with one (empty) entry
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user