mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-05-21 14:56:41 +00:00
rebase: update replaced k8s.io modules to v0.33.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
parent
dd77e72800
commit
107407b44b
56
e2e/go.mod
56
e2e/go.mod
@ -1,7 +1,8 @@
|
||||
module github.com/ceph/ceph-csi/e2e
|
||||
|
||||
go 1.23.1
|
||||
toolchain go1.24.1
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.2
|
||||
|
||||
// my own dependencies
|
||||
replace (
|
||||
@ -19,20 +20,20 @@ require (
|
||||
k8s.io/apimachinery v0.33.0
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.33.0
|
||||
k8s.io/kubernetes v1.32.3
|
||||
k8s.io/kubernetes v1.33.0
|
||||
k8s.io/pod-security-admission v0.33.0
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.32.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.32.2
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.32.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.2
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.32.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.32.2
|
||||
k8s.io/api => k8s.io/api v0.33.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.33.0
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.33.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.0
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.33.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.33.0
|
||||
)
|
||||
|
||||
require github.com/ceph/ceph-csi v2.0.1+incompatible
|
||||
@ -41,19 +42,19 @@ require (
|
||||
cel.dev/expr v0.20.0 // indirect
|
||||
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/containerd/api v1.7.19 // indirect
|
||||
github.com/containerd/errdefs v0.1.0 // indirect
|
||||
github.com/containerd/containerd/api v1.8.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/ttrpc v1.2.5 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/containerd/ttrpc v1.2.6 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.2 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
@ -70,16 +71,13 @@ require (
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cadvisor v0.51.0 // indirect
|
||||
github.com/google/cadvisor v0.52.1 // indirect
|
||||
github.com/google/cel-go v0.23.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@ -95,8 +93,9 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/cgroups v0.0.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.2.1 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
@ -109,9 +108,6 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
@ -123,8 +119,6 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
@ -141,23 +135,21 @@ require (
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.1 // indirect
|
||||
k8s.io/apiserver v0.33.0 // indirect
|
||||
k8s.io/component-base v0.33.0 // indirect
|
||||
k8s.io/component-helpers v0.33.0 // indirect
|
||||
k8s.io/controller-manager v0.33.0 // indirect
|
||||
k8s.io/cri-api v0.32.2 // indirect
|
||||
k8s.io/cri-api v0.33.0 // indirect
|
||||
k8s.io/cri-client v0.0.0 // indirect
|
||||
k8s.io/csi-translation-lib v0.32.2 // indirect
|
||||
k8s.io/dynamic-resource-allocation v0.0.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.33.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/kube-scheduler v0.0.0 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.32.2 // indirect
|
||||
k8s.io/kubelet v0.33.0 // indirect
|
||||
k8s.io/mount-utils v0.32.2 // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
|
126
e2e/go.sum
126
e2e/go.sum
@ -4,8 +4,6 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1
|
||||
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
@ -18,23 +16,23 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA=
|
||||
github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig=
|
||||
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
|
||||
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||
github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
|
||||
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
|
||||
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/containerd/ttrpc v1.2.6 h1:zG+Kn5EZ6MUYCS1t2Hmt2J4tMVaLSFEJVOraDQwNPC4=
|
||||
github.com/containerd/ttrpc v1.2.6/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.2.2 h1:3jN/k2ysKuPCsln5Qv8bzR9cxal8XjkxPogJfSNO31k=
|
||||
github.com/containerd/typeurl/v2 v2.2.2/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
|
||||
github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@ -47,8 +45,6 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=
|
||||
@ -79,14 +75,10 @@ github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8k=
|
||||
github.com/google/cadvisor v0.51.0/go.mod h1:czGE/c/P/i0QFpVNKTFrIEzord9Y10YfpwuaSWXELc0=
|
||||
github.com/google/cadvisor v0.52.1 h1:sC8SZ6jio9ds+P2dk51bgbeYeufxo55n0X3tmrpA9as=
|
||||
github.com/google/cadvisor v0.52.1/go.mod h1:OAhPcx1nOm5YwMh/JhpUOMKyv1YKLRtS9KgzWPndHmA=
|
||||
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
|
||||
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
@ -95,26 +87,16 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
@ -158,12 +140,12 @@ github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA=
|
||||
github.com/opencontainers/cgroups v0.0.1/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runc v1.2.1 h1:mQkmeFSUxqFaVmvIn1VQPeQIKpHFya5R07aJw0DKQa8=
|
||||
github.com/opencontainers/runc v1.2.1/go.mod h1:/PXzF0h531HTMsYQnmxXkBD7YaGShm/2zcRB79dksUc=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||
@ -188,8 +170,6 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
@ -208,30 +188,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
|
||||
@ -308,8 +268,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
@ -325,21 +283,19 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw=
|
||||
k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y=
|
||||
k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4=
|
||||
k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA=
|
||||
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
|
||||
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
|
||||
k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs=
|
||||
k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc=
|
||||
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
|
||||
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
|
||||
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
|
||||
k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA=
|
||||
k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94=
|
||||
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
|
||||
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
|
||||
k8s.io/cloud-provider v0.33.0 h1:nVU2Q9QK7O50yaNx+pE61oDPqflsSsKygN43f5js9+I=
|
||||
k8s.io/cloud-provider v0.33.0/go.mod h1:2reyEBbsimZJKHF325vxLBD5fcJGNeJHeLjJ+jGM8Qg=
|
||||
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
|
||||
@ -348,28 +304,26 @@ k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
|
||||
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
|
||||
k8s.io/controller-manager v0.33.0 h1:O9LnTjffOe62d66gMcKLuPXsBjY5sqETWEIzg+DVL8w=
|
||||
k8s.io/controller-manager v0.33.0/go.mod h1:vQwAQnroav4+UyE2acW1Rj6CSsHPzr2/018kgRLYqlI=
|
||||
k8s.io/cri-api v0.32.2 h1:7DuaOHpOcXweZeBUbRdK0iCroxctGp73VwgrA0u7kho=
|
||||
k8s.io/cri-api v0.32.2/go.mod h1:DCzMuTh2padoinefWME0G678Mc3QFbLMF2vEweGzBAI=
|
||||
k8s.io/cri-client v0.32.2 h1:vjowJUyu14IbmifqCKJHE9rK/BPSfkXvltqN42W1Zuo=
|
||||
k8s.io/cri-client v0.32.2/go.mod h1:fRZhmmZW16Qviln8hfy+e8dd2wP/n9B6TiGxLE3zBe0=
|
||||
k8s.io/csi-translation-lib v0.32.2 h1:aLzAyaoJUc5rgtLi8Xd4No1tet6UpvUsGIgRoGnPSSE=
|
||||
k8s.io/csi-translation-lib v0.32.2/go.mod h1:PlOKan6Vc0G6a+giQbm36plJ+E1LH+GPRLAVMQMSMcY=
|
||||
k8s.io/dynamic-resource-allocation v0.32.2 h1:6wP8/GGvhhvTJLrzwPSoMJDnspmosFj1CKmfrAH6m5U=
|
||||
k8s.io/dynamic-resource-allocation v0.32.2/go.mod h1:+3qnQfvikLHVZrdZ0/gYkRiV96weUR9j7+Ph3Ui/hYU=
|
||||
k8s.io/cri-api v0.33.0 h1:YyGNgWmuSREqFPlP3XCstlHLilYdW898KwtKoaTYwBs=
|
||||
k8s.io/cri-api v0.33.0/go.mod h1:OLQvT45OpIA+tv91ZrpuFIGY+Y2Ho23poS7n115Aocs=
|
||||
k8s.io/cri-client v0.33.0 h1:NLB4SKWQqJ2jPtbbdKFY2gEEw/GKSKifSqUf4m/SwSs=
|
||||
k8s.io/cri-client v0.33.0/go.mod h1:ZIbzmm5ByB0cz0nc5qUlgKZwi1KivOGVXgearqF27cU=
|
||||
k8s.io/csi-translation-lib v0.33.0 h1:kW3xVPCTXmHmK5v/8PEVZCUZSdNRndiQat0SbN31qEM=
|
||||
k8s.io/csi-translation-lib v0.33.0/go.mod h1:Ldx85t1WxFStKQ2p5Xaimv39IkTc7/m8yNMkt8MlyoQ=
|
||||
k8s.io/dynamic-resource-allocation v0.33.0 h1:/b04omcFsLBf2vqh/t1aXhj8fs2eHgKIZzUS/y3Z2qo=
|
||||
k8s.io/dynamic-resource-allocation v0.33.0/go.mod h1:BHw7vU8bheqj6sdz31gETuvjQkNOcEleBTnrJ9j8vA8=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.33.0 h1:fhQSW/vyaWDhMp0vDuO/sLg2RlGZf4F77beSXcB4/eE=
|
||||
k8s.io/kms v0.33.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/kube-scheduler v0.32.2 h1:vBm6iIjWaD10OPmtkt/503LTKvrN8dWVceeBcpKj/ns=
|
||||
k8s.io/kube-scheduler v0.32.2/go.mod h1:dD5yuYpnsCfgZmzvncUNPdvXGJXA1hw3gXq7DH3+aCQ=
|
||||
k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us=
|
||||
k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8=
|
||||
k8s.io/kubelet v0.32.2 h1:WFTSYdt3BB1aTApDuKNI16x/4MYqqX8WBBBBh3KupDg=
|
||||
k8s.io/kubelet v0.32.2/go.mod h1:cC1ms5RS+lu0ckVr6AviCQXHLSPKEBC3D5oaCBdTGkI=
|
||||
k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA=
|
||||
k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k=
|
||||
k8s.io/kube-scheduler v0.33.0 h1:G5psn7ynB5Vjfo0ia8uz32Gv46TRa5RgTq4PTm+yjR0=
|
||||
k8s.io/kube-scheduler v0.33.0/go.mod h1:I+0aqwJ3G9f9pyfKfoN5b0uP9M6MinNpxXRlCXkM17E=
|
||||
k8s.io/kubectl v0.33.0 h1:HiRb1yqibBSCqic4pRZP+viiOBAnIdwYDpzUFejs07g=
|
||||
k8s.io/kubectl v0.33.0/go.mod h1:gAlGBuS1Jq1fYZ9AjGWbI/5Vk3M/VW2DK4g10Fpyn/0=
|
||||
k8s.io/kubelet v0.33.0 h1:4pJA2Ge6Rp0kDNV76KH7pTBiaV2T1a1874QHMcubuSU=
|
||||
k8s.io/kubelet v0.33.0/go.mod h1:iDnxbJQMy9DUNaML5L/WUlt3uJtNLWh7ZAe0JSp4Yi0=
|
||||
k8s.io/kubernetes v1.33.0 h1:BP5Y5yIzUZVeBuE/ESZvnw6TNxjXbLsCckIkljE+R0U=
|
||||
k8s.io/kubernetes v1.33.0/go.mod h1:2nWuPk0seE4+6sd0x60wQ6rYEXcV7SoeMbU0YbFm/5k=
|
||||
k8s.io/mount-utils v0.32.2 h1:aDwp+ucWiVnDr/LpRg88/dsXf/vm6gI1VZkYH3+3+Vw=
|
||||
k8s.io/mount-utils v0.32.2/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
|
||||
k8s.io/pod-security-admission v0.33.0 h1:di/iicB5plCq+iQeqgf2s1N5DOSzTDiOOv5OiAbuYWE=
|
||||
|
1
e2e/vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
1
e2e/vendor/github.com/NYTimes/gziphandler/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
*.swp
|
10
e2e/vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
10
e2e/vendor/github.com/NYTimes/gziphandler/.travis.yml
generated
vendored
@ -1,10 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
install:
|
||||
- go mod download
|
||||
script:
|
||||
- go test -race -v
|
75
e2e/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
75
e2e/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
generated
vendored
@ -1,75 +0,0 @@
|
||||
---
|
||||
layout: code-of-conduct
|
||||
version: v1.0
|
||||
---
|
||||
|
||||
This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community.
|
||||
|
||||
Our open source community strives to:
|
||||
|
||||
* **Be friendly and patient.**
|
||||
* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
|
||||
* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language.
|
||||
* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one.
|
||||
* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable.
|
||||
* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes.
|
||||
|
||||
## Definitions
|
||||
|
||||
Harassment includes, but is not limited to:
|
||||
|
||||
- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation
|
||||
- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
|
||||
- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle
|
||||
- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop
|
||||
- Threats of violence, both physical and psychological
|
||||
- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
|
||||
- Deliberate intimidation
|
||||
- Stalking or following
|
||||
- Harassing photography or recording, including logging online activity for harassment purposes
|
||||
- Sustained disruption of discussion
|
||||
- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour
|
||||
- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others
|
||||
- Continued one-on-one communication after requests to cease
|
||||
- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse
|
||||
- Publication of non-harassing private communication
|
||||
|
||||
Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
|
||||
|
||||
- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’
|
||||
- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
|
||||
- Refusal to explain or debate social justice concepts
|
||||
- Communicating in a ‘tone’ you don’t find congenial
|
||||
- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions
|
||||
|
||||
|
||||
### Diversity Statement
|
||||
|
||||
We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong.
|
||||
|
||||
Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected
|
||||
characteristics above, including participants with disabilities.
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include:
|
||||
|
||||
- Your contact information.
|
||||
- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please
|
||||
include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link.
|
||||
- Any additional information that may be helpful.
|
||||
|
||||
After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse.
|
||||
|
||||
### Attribution & Acknowledgements
|
||||
|
||||
We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration:
|
||||
|
||||
* [Django](https://www.djangoproject.com/conduct/reporting/)
|
||||
* [Python](https://www.python.org/community/diversity/)
|
||||
* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct)
|
||||
* [Contributor Covenant](http://contributor-covenant.org/)
|
||||
* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/)
|
||||
* [Citizen Code of Conduct](http://citizencodeofconduct.org/)
|
||||
|
||||
This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct
|
30
e2e/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
30
e2e/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
generated
vendored
@ -1,30 +0,0 @@
|
||||
# Contributing to NYTimes/gziphandler
|
||||
|
||||
This is an open source project started by handful of developers at The New York Times and open to the entire Go community.
|
||||
|
||||
We really appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
## Contributing code
|
||||
|
||||
Before submitting changes, please follow these guidelines:
|
||||
|
||||
1. Check the open issues and pull requests for existing discussions.
|
||||
2. Open an issue to discuss a new feature.
|
||||
3. Write tests.
|
||||
4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
5. Make sure your changes pass `go test`.
|
||||
6. Make sure the entire test suite passes locally and on Travis CI.
|
||||
7. Open a Pull Request.
|
||||
8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||
|
||||
Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file.
|
201
e2e/vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
201
e2e/vendor/github.com/NYTimes/gziphandler/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
56
e2e/vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
56
e2e/vendor/github.com/NYTimes/gziphandler/README.md
generated
vendored
@ -1,56 +0,0 @@
|
||||
Gzip Handler
|
||||
============
|
||||
|
||||
This is a tiny Go package which wraps HTTP handlers to transparently gzip the
|
||||
response body, for clients which support it. Although it's usually simpler to
|
||||
leave that to a reverse proxy (like nginx or Varnish), this package is useful
|
||||
when that's undesirable.
|
||||
|
||||
## Install
|
||||
```bash
|
||||
go get -u github.com/NYTimes/gziphandler
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Call `GzipHandler` with any handler (an object which implements the
|
||||
`http.Handler` interface), and it'll return a new handler which gzips the
|
||||
response. For example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"github.com/NYTimes/gziphandler"
|
||||
)
|
||||
|
||||
func main() {
|
||||
withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "Hello, World")
|
||||
})
|
||||
|
||||
withGz := gziphandler.GzipHandler(withoutGz)
|
||||
|
||||
http.Handle("/", withGz)
|
||||
http.ListenAndServe("0.0.0.0:8000", nil)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The docs can be found at [godoc.org][docs], as usual.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0][license].
|
||||
|
||||
|
||||
|
||||
|
||||
[docs]: https://godoc.org/github.com/NYTimes/gziphandler
|
||||
[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
|
532
e2e/vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
532
e2e/vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
@ -1,532 +0,0 @@
|
||||
package gziphandler // import "github.com/NYTimes/gziphandler"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
vary = "Vary"
|
||||
acceptEncoding = "Accept-Encoding"
|
||||
contentEncoding = "Content-Encoding"
|
||||
contentType = "Content-Type"
|
||||
contentLength = "Content-Length"
|
||||
)
|
||||
|
||||
type codings map[string]float64
|
||||
|
||||
const (
|
||||
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
|
||||
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
|
||||
// The examples seem to indicate that it is.
|
||||
DefaultQValue = 1.0
|
||||
|
||||
// DefaultMinSize is the default minimum size until we enable gzip compression.
|
||||
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
|
||||
// If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
|
||||
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
|
||||
DefaultMinSize = 1400
|
||||
)
|
||||
|
||||
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
|
||||
// gzip.Writers. Use poolIndex to covert a compression level to an index into
|
||||
// gzipWriterPools.
|
||||
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
|
||||
addLevelPool(i)
|
||||
}
|
||||
addLevelPool(gzip.DefaultCompression)
|
||||
}
|
||||
|
||||
// poolIndex maps a compression level to its index into gzipWriterPools. It
|
||||
// assumes that level is a valid gzip compression level.
|
||||
func poolIndex(level int) int {
|
||||
// gzip.DefaultCompression == -1, so we need to treat it special.
|
||||
if level == gzip.DefaultCompression {
|
||||
return gzip.BestCompression - gzip.BestSpeed + 1
|
||||
}
|
||||
return level - gzip.BestSpeed
|
||||
}
|
||||
|
||||
func addLevelPool(level int) {
|
||||
gzipWriterPools[poolIndex(level)] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
// NewWriterLevel only returns error on a bad level, we are guaranteeing
|
||||
// that this will be a valid level so it is okay to ignore the returned
|
||||
// error.
|
||||
w, _ := gzip.NewWriterLevel(nil, level)
|
||||
return w
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
|
||||
// bytes before writing them to the underlying response. This doesn't close the
|
||||
// writers, so don't forget to do that.
|
||||
// It can be configured to skip response smaller than minSize.
|
||||
type GzipResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
index int // Index for gzipWriterPools.
|
||||
gw *gzip.Writer
|
||||
|
||||
code int // Saves the WriteHeader value.
|
||||
|
||||
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
|
||||
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
|
||||
ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
|
||||
|
||||
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
|
||||
}
|
||||
|
||||
type GzipResponseWriterWithCloseNotify struct {
|
||||
*GzipResponseWriter
|
||||
}
|
||||
|
||||
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
|
||||
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Write appends data to the gzip writer.
|
||||
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
|
||||
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
|
||||
if w.gw != nil {
|
||||
return w.gw.Write(b)
|
||||
}
|
||||
|
||||
// If we have already decided not to use GZIP, immediately passthrough.
|
||||
if w.ignore {
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
|
||||
// On the first write, w.buf changes from nil to a valid slice
|
||||
w.buf = append(w.buf, b...)
|
||||
|
||||
var (
|
||||
cl, _ = strconv.Atoi(w.Header().Get(contentLength))
|
||||
ct = w.Header().Get(contentType)
|
||||
ce = w.Header().Get(contentEncoding)
|
||||
)
|
||||
// Only continue if they didn't already choose an encoding or a known unhandled content length or type.
|
||||
if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
|
||||
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
|
||||
if len(w.buf) < w.minSize && cl == 0 {
|
||||
return len(b), nil
|
||||
}
|
||||
// If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
|
||||
if cl >= w.minSize || len(w.buf) >= w.minSize {
|
||||
// If a Content-Type wasn't specified, infer it from the current buffer.
|
||||
if ct == "" {
|
||||
ct = http.DetectContentType(w.buf)
|
||||
w.Header().Set(contentType, ct)
|
||||
}
|
||||
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
|
||||
if handleContentType(w.contentTypes, ct) {
|
||||
if err := w.startGzip(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we got here, we should not GZIP this response.
|
||||
if err := w.startPlain(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// startGzip initializes a GZIP writer and writes the buffer.
|
||||
func (w *GzipResponseWriter) startGzip() error {
|
||||
// Set the GZIP header.
|
||||
w.Header().Set(contentEncoding, "gzip")
|
||||
|
||||
// if the Content-Length is already set, then calls to Write on gzip
|
||||
// will fail to set the Content-Length header since its already set
|
||||
// See: https://github.com/golang/go/issues/14975.
|
||||
w.Header().Del(contentLength)
|
||||
|
||||
// Write the header to gzip response.
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
|
||||
// Initialize and flush the buffer into the gzip response if there are any bytes.
|
||||
// If there aren't any, we shouldn't initialize it yet because on Close it will
|
||||
// write the gzip header even if nothing was ever written.
|
||||
if len(w.buf) > 0 {
|
||||
// Initialize the GZIP response.
|
||||
w.init()
|
||||
n, err := w.gw.Write(w.buf)
|
||||
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
|
||||
func (w *GzipResponseWriter) startPlain() error {
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
// Ensure that no other WriteHeader's happen
|
||||
w.code = 0
|
||||
}
|
||||
w.ignore = true
|
||||
// If Write was never called then don't call Write on the underlying ResponseWriter.
|
||||
if w.buf == nil {
|
||||
return nil
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(w.buf)
|
||||
w.buf = nil
|
||||
// This should never happen (per io.Writer docs), but if the write didn't
|
||||
// accept the entire buffer but returned no specific error, we have no clue
|
||||
// what's going on, so abort just to be safe.
|
||||
if err == nil && n < len(w.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteHeader just saves the response code until close or GZIP effective writes.
|
||||
func (w *GzipResponseWriter) WriteHeader(code int) {
|
||||
if w.code == 0 {
|
||||
w.code = code
|
||||
}
|
||||
}
|
||||
|
||||
// init graps a new gzip writer from the gzipWriterPool and writes the correct
|
||||
// content encoding header.
|
||||
func (w *GzipResponseWriter) init() {
|
||||
// Bytes written during ServeHTTP are redirected to this gzip writer
|
||||
// before being written to the underlying response.
|
||||
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
|
||||
gzw.Reset(w.ResponseWriter)
|
||||
w.gw = gzw
|
||||
}
|
||||
|
||||
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
|
||||
func (w *GzipResponseWriter) Close() error {
|
||||
if w.ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.gw == nil {
|
||||
// GZIP not triggered yet, write out regular response.
|
||||
err := w.startPlain()
|
||||
// Returns the error if any at write.
|
||||
if err != nil {
|
||||
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := w.gw.Close()
|
||||
gzipWriterPools[w.index].Put(w.gw)
|
||||
w.gw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush flushes the underlying *gzip.Writer and then the underlying
|
||||
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
|
||||
// an http.Flusher.
|
||||
func (w *GzipResponseWriter) Flush() {
|
||||
if w.gw == nil && !w.ignore {
|
||||
// Only flush once startGzip or startPlain has been called.
|
||||
//
|
||||
// Flush is thus a no-op until we're certain whether a plain
|
||||
// or gzipped response will be served.
|
||||
return
|
||||
}
|
||||
|
||||
if w.gw != nil {
|
||||
w.gw.Flush()
|
||||
}
|
||||
|
||||
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
|
||||
// Hijacker, its Hijack method is returned. Otherwise an error is returned.
|
||||
func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
|
||||
return hj.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
|
||||
}
|
||||
|
||||
// verify Hijacker interface implementation
|
||||
var _ http.Hijacker = &GzipResponseWriter{}
|
||||
|
||||
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
|
||||
// an error case it panics rather than returning an error.
|
||||
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
|
||||
wrap, err := NewGzipLevelHandler(level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return wrap
|
||||
}
|
||||
|
||||
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
|
||||
// which can be used to wrap an HTTP handler to transparently gzip the response
|
||||
// body if the client supports it (via the Accept-Encoding header). Responses will
|
||||
// be encoded at the given gzip compression level. An error will be returned only
|
||||
// if an invalid gzip compression level is given, so if one can ensure the level
|
||||
// is valid, the returned error can be safely ignored.
|
||||
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
|
||||
return NewGzipLevelAndMinSize(level, DefaultMinSize)
|
||||
}
|
||||
|
||||
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
|
||||
// specify the minimum size before compression.
|
||||
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
|
||||
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
|
||||
}
|
||||
|
||||
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
|
||||
c := &config{
|
||||
level: gzip.DefaultCompression,
|
||||
minSize: DefaultMinSize,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
|
||||
if err := c.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func(h http.Handler) http.Handler {
|
||||
index := poolIndex(c.level)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add(vary, acceptEncoding)
|
||||
if acceptsGzip(r) {
|
||||
gw := &GzipResponseWriter{
|
||||
ResponseWriter: w,
|
||||
index: index,
|
||||
minSize: c.minSize,
|
||||
contentTypes: c.contentTypes,
|
||||
}
|
||||
defer gw.Close()
|
||||
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
gwcn := GzipResponseWriterWithCloseNotify{gw}
|
||||
h.ServeHTTP(gwcn, r)
|
||||
} else {
|
||||
h.ServeHTTP(gw, r)
|
||||
}
|
||||
|
||||
} else {
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Parsed representation of one of the inputs to ContentTypes.
|
||||
// See https://golang.org/pkg/mime/#ParseMediaType
|
||||
type parsedContentType struct {
|
||||
mediaType string
|
||||
params map[string]string
|
||||
}
|
||||
|
||||
// equals returns whether this content type matches another content type.
|
||||
func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
|
||||
if pct.mediaType != mediaType {
|
||||
return false
|
||||
}
|
||||
// if pct has no params, don't care about other's params
|
||||
if len(pct.params) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// if pct has any params, they must be identical to other's.
|
||||
if len(pct.params) != len(params) {
|
||||
return false
|
||||
}
|
||||
for k, v := range pct.params {
|
||||
if w, ok := params[k]; !ok || v != w {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used for functional configuration.
|
||||
type config struct {
|
||||
minSize int
|
||||
level int
|
||||
contentTypes []parsedContentType
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
|
||||
return fmt.Errorf("invalid compression level requested: %d", c.level)
|
||||
}
|
||||
|
||||
if c.minSize < 0 {
|
||||
return fmt.Errorf("minimum size must be more than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type option func(c *config)
|
||||
|
||||
func MinSize(size int) option {
|
||||
return func(c *config) {
|
||||
c.minSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func CompressionLevel(level int) option {
|
||||
return func(c *config) {
|
||||
c.level = level
|
||||
}
|
||||
}
|
||||
|
||||
// ContentTypes specifies a list of content types to compare
|
||||
// the Content-Type header to before compressing. If none
|
||||
// match, the response will be returned as-is.
|
||||
//
|
||||
// Content types are compared in a case-insensitive, whitespace-ignored
|
||||
// manner.
|
||||
//
|
||||
// A MIME type without any other directive will match a content type
|
||||
// that has the same MIME type, regardless of that content type's other
|
||||
// directives. I.e., "text/html" will match both "text/html" and
|
||||
// "text/html; charset=utf-8".
|
||||
//
|
||||
// A MIME type with any other directive will only match a content type
|
||||
// that has the same MIME type and other directives. I.e.,
|
||||
// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
|
||||
//
|
||||
// By default, responses are gzipped regardless of
|
||||
// Content-Type.
|
||||
func ContentTypes(types []string) option {
|
||||
return func(c *config) {
|
||||
c.contentTypes = []parsedContentType{}
|
||||
for _, v := range types {
|
||||
mediaType, params, err := mime.ParseMediaType(v)
|
||||
if err == nil {
|
||||
c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
|
||||
// the client supports it (via the Accept-Encoding header). This will compress at
|
||||
// the default compression level.
|
||||
func GzipHandler(h http.Handler) http.Handler {
|
||||
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
|
||||
return wrapper(h)
|
||||
}
|
||||
|
||||
// acceptsGzip returns true if the given HTTP request indicates that it will
|
||||
// accept a gzipped response.
|
||||
func acceptsGzip(r *http.Request) bool {
|
||||
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
|
||||
return acceptedEncodings["gzip"] > 0.0
|
||||
}
|
||||
|
||||
// returns true if we've been configured to compress the specific content type.
|
||||
func handleContentType(contentTypes []parsedContentType, ct string) bool {
|
||||
// If contentTypes is empty we handle all content types.
|
||||
if len(contentTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
mediaType, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range contentTypes {
|
||||
if c.equals(mediaType, params) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
|
||||
// appear in an Accept-Encoding header. It returns a map of content-codings to
|
||||
// quality values, and an error containing the errors encountered. It's probably
|
||||
// safe to ignore those, because silently ignoring errors is how the internet
|
||||
// works.
|
||||
//
|
||||
// See: http://tools.ietf.org/html/rfc2616#section-14.3.
|
||||
func parseEncodings(s string) (codings, error) {
|
||||
c := make(codings)
|
||||
var e []string
|
||||
|
||||
for _, ss := range strings.Split(s, ",") {
|
||||
coding, qvalue, err := parseCoding(ss)
|
||||
|
||||
if err != nil {
|
||||
e = append(e, err.Error())
|
||||
} else {
|
||||
c[coding] = qvalue
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (adammck): Use a proper multi-error struct, so the individual errors
|
||||
// can be extracted if anyone cares.
|
||||
if len(e) > 0 {
|
||||
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// parseCoding parses a single conding (content-coding with an optional qvalue),
|
||||
// as might appear in an Accept-Encoding header. It attempts to forgive minor
|
||||
// formatting errors.
|
||||
func parseCoding(s string) (coding string, qvalue float64, err error) {
|
||||
for n, part := range strings.Split(s, ";") {
|
||||
part = strings.TrimSpace(part)
|
||||
qvalue = DefaultQValue
|
||||
|
||||
if n == 0 {
|
||||
coding = strings.ToLower(part)
|
||||
} else if strings.HasPrefix(part, "q=") {
|
||||
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
|
||||
|
||||
if qvalue < 0.0 {
|
||||
qvalue = 0.0
|
||||
} else if qvalue > 1.0 {
|
||||
qvalue = 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coding == "" {
|
||||
err = fmt.Errorf("empty content-coding")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
43
e2e/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
43
e2e/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// +build go1.8
|
||||
|
||||
package gziphandler
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Push initiates an HTTP/2 server push.
|
||||
// Push returns ErrNotSupported if the client has disabled push or if push
|
||||
// is not supported on the underlying connection.
|
||||
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
pusher, ok := w.ResponseWriter.(http.Pusher)
|
||||
if ok && pusher != nil {
|
||||
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
|
||||
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
|
||||
|
||||
if opts == nil {
|
||||
opts = &http.PushOptions{
|
||||
Header: http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if opts.Header == nil {
|
||||
opts.Header = http.Header{
|
||||
acceptEncoding: []string{"gzip"},
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
|
||||
opts.Header.Add(acceptEncoding, "gzip")
|
||||
return opts
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
//go:build !no_grpc
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
|
174
e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go
generated
vendored
Normal file
174
e2e/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_ttrpc.pb.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
|
||||
package containers
|
||||
|
||||
import (
|
||||
context "context"
|
||||
ttrpc "github.com/containerd/ttrpc"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
type TTRPCContainersService interface {
|
||||
Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
|
||||
List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
|
||||
ListStream(context.Context, *ListContainersRequest, TTRPCContainers_ListStreamServer) error
|
||||
Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
|
||||
Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
|
||||
Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
type TTRPCContainers_ListStreamServer interface {
|
||||
Send(*ListContainerMessage) error
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
type ttrpccontainersListStreamServer struct {
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
func (x *ttrpccontainersListStreamServer) Send(m *ListContainerMessage) error {
|
||||
return x.StreamServer.SendMsg(m)
|
||||
}
|
||||
|
||||
func RegisterTTRPCContainersService(srv *ttrpc.Server, svc TTRPCContainersService) {
|
||||
srv.RegisterService("containerd.services.containers.v1.Containers", &ttrpc.ServiceDesc{
|
||||
Methods: map[string]ttrpc.Method{
|
||||
"Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req GetContainerRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Get(ctx, &req)
|
||||
},
|
||||
"List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ListContainersRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.List(ctx, &req)
|
||||
},
|
||||
"Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req CreateContainerRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Create(ctx, &req)
|
||||
},
|
||||
"Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req UpdateContainerRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Update(ctx, &req)
|
||||
},
|
||||
"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req DeleteContainerRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Delete(ctx, &req)
|
||||
},
|
||||
},
|
||||
Streams: map[string]ttrpc.Stream{
|
||||
"ListStream": {
|
||||
Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
|
||||
m := new(ListContainersRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, svc.ListStream(ctx, m, &ttrpccontainersListStreamServer{stream})
|
||||
},
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type TTRPCContainersClient interface {
|
||||
Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
|
||||
List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
|
||||
ListStream(context.Context, *ListContainersRequest) (TTRPCContainers_ListStreamClient, error)
|
||||
Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
|
||||
Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
|
||||
Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
type ttrpccontainersClient struct {
|
||||
client *ttrpc.Client
|
||||
}
|
||||
|
||||
func NewTTRPCContainersClient(client *ttrpc.Client) TTRPCContainersClient {
|
||||
return &ttrpccontainersClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) {
|
||||
var resp GetContainerResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Get", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) {
|
||||
var resp ListContainersResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "List", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) ListStream(ctx context.Context, req *ListContainersRequest) (TTRPCContainers_ListStreamClient, error) {
|
||||
stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
}, "containerd.services.containers.v1.Containers", "ListStream", req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &ttrpccontainersListStreamClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TTRPCContainers_ListStreamClient interface {
|
||||
Recv() (*ListContainerMessage, error)
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
type ttrpccontainersListStreamClient struct {
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *ttrpccontainersListStreamClient) Recv() (*ListContainerMessage, error) {
|
||||
m := new(ListContainerMessage)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) {
|
||||
var resp CreateContainerResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Create", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) {
|
||||
var resp UpdateContainerResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Update", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontainersClient) Delete(ctx context.Context, req *DeleteContainerRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.containers.v1.Containers", "Delete", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
2
e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go
generated
vendored
2
e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
//go:build !no_grpc
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
|
301
e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go
generated
vendored
Normal file
301
e2e/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_ttrpc.pb.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
|
||||
package tasks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
ttrpc "github.com/containerd/ttrpc"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
type TTRPCTasksService interface {
|
||||
Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
|
||||
Start(context.Context, *StartRequest) (*StartResponse, error)
|
||||
Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error)
|
||||
DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
|
||||
Get(context.Context, *GetRequest) (*GetResponse, error)
|
||||
List(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
|
||||
Kill(context.Context, *KillRequest) (*emptypb.Empty, error)
|
||||
Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error)
|
||||
ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error)
|
||||
CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error)
|
||||
Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error)
|
||||
Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error)
|
||||
ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
|
||||
Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error)
|
||||
Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error)
|
||||
Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error)
|
||||
Wait(context.Context, *WaitRequest) (*WaitResponse, error)
|
||||
}
|
||||
|
||||
func RegisterTTRPCTasksService(srv *ttrpc.Server, svc TTRPCTasksService) {
|
||||
srv.RegisterService("containerd.services.tasks.v1.Tasks", &ttrpc.ServiceDesc{
|
||||
Methods: map[string]ttrpc.Method{
|
||||
"Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req CreateTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Create(ctx, &req)
|
||||
},
|
||||
"Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req StartRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Start(ctx, &req)
|
||||
},
|
||||
"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req DeleteTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Delete(ctx, &req)
|
||||
},
|
||||
"DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req DeleteProcessRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.DeleteProcess(ctx, &req)
|
||||
},
|
||||
"Get": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req GetRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Get(ctx, &req)
|
||||
},
|
||||
"List": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ListTasksRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.List(ctx, &req)
|
||||
},
|
||||
"Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req KillRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Kill(ctx, &req)
|
||||
},
|
||||
"Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ExecProcessRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Exec(ctx, &req)
|
||||
},
|
||||
"ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ResizePtyRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.ResizePty(ctx, &req)
|
||||
},
|
||||
"CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req CloseIORequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.CloseIO(ctx, &req)
|
||||
},
|
||||
"Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req PauseTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Pause(ctx, &req)
|
||||
},
|
||||
"Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ResumeTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Resume(ctx, &req)
|
||||
},
|
||||
"ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ListPidsRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.ListPids(ctx, &req)
|
||||
},
|
||||
"Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req CheckpointTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Checkpoint(ctx, &req)
|
||||
},
|
||||
"Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req UpdateTaskRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Update(ctx, &req)
|
||||
},
|
||||
"Metrics": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req MetricsRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Metrics(ctx, &req)
|
||||
},
|
||||
"Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req WaitRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Wait(ctx, &req)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type ttrpctasksClient struct {
|
||||
client *ttrpc.Client
|
||||
}
|
||||
|
||||
func NewTTRPCTasksClient(client *ttrpc.Client) TTRPCTasksService {
|
||||
return &ttrpctasksClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) {
|
||||
var resp CreateTaskResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Create", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) {
|
||||
var resp StartResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Start", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) {
|
||||
var resp DeleteResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Delete", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) {
|
||||
var resp DeleteResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "DeleteProcess", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) {
|
||||
var resp GetResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Get", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) {
|
||||
var resp ListTasksResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "List", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Kill(ctx context.Context, req *KillRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Kill", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Exec(ctx context.Context, req *ExecProcessRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Exec", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ResizePty", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) CloseIO(ctx context.Context, req *CloseIORequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "CloseIO", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Pause(ctx context.Context, req *PauseTaskRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Pause", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Resume(ctx context.Context, req *ResumeTaskRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Resume", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) {
|
||||
var resp ListPidsResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "ListPids", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) {
|
||||
var resp CheckpointTaskResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Checkpoint", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Update(ctx context.Context, req *UpdateTaskRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Update", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) {
|
||||
var resp MetricsResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Metrics", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpctasksClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) {
|
||||
var resp WaitResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.tasks.v1.Tasks", "Wait", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
//go:build !no_grpc
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
|
45
e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go
generated
vendored
Normal file
45
e2e/vendor/github.com/containerd/containerd/api/services/version/v1/version_ttrpc.pb.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
package version
|
||||
|
||||
import (
|
||||
context "context"
|
||||
ttrpc "github.com/containerd/ttrpc"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
type TTRPCVersionService interface {
|
||||
Version(context.Context, *emptypb.Empty) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
func RegisterTTRPCVersionService(srv *ttrpc.Server, svc TTRPCVersionService) {
|
||||
srv.RegisterService("containerd.services.version.v1.Version", &ttrpc.ServiceDesc{
|
||||
Methods: map[string]ttrpc.Method{
|
||||
"Version": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req emptypb.Empty
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Version(ctx, &req)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type ttrpcversionClient struct {
|
||||
client *ttrpc.Client
|
||||
}
|
||||
|
||||
func NewTTRPCVersionClient(client *ttrpc.Client) TTRPCVersionService {
|
||||
return &ttrpcversionClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ttrpcversionClient) Version(ctx context.Context, req *emptypb.Empty) (*VersionResponse, error) {
|
||||
var resp VersionResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.version.v1.Version", "Version", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
209
e2e/vendor/github.com/containerd/containerd/api/types/event.pb.go
generated
vendored
Normal file
209
e2e/vendor/github.com/containerd/containerd/api/types/event.pb.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
||||
//
|
||||
//Copyright The containerd Authors.
|
||||
//
|
||||
//Licensed under the Apache License, Version 2.0 (the "License");
|
||||
//you may not use this file except in compliance with the License.
|
||||
//You may obtain a copy of the License at
|
||||
//
|
||||
//http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
//Unless required by applicable law or agreed to in writing, software
|
||||
//distributed under the License is distributed on an "AS IS" BASIS,
|
||||
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//See the License for the specific language governing permissions and
|
||||
//limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.20.1
|
||||
// source: github.com/containerd/containerd/api/types/event.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Envelope struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Envelope) Reset() {
|
||||
*x = Envelope{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Envelope) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
|
||||
func (x *Envelope) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
|
||||
func (*Envelope) Descriptor() ([]byte, []int) {
|
||||
return file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Envelope) GetTimestamp() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Envelope) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Envelope) GetTopic() string {
|
||||
if x != nil {
|
||||
return x.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Envelope) GetEvent() *anypb.Any {
|
||||
if x != nil {
|
||||
return x.Event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_github_com_containerd_containerd_api_types_event_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_containerd_containerd_api_types_event_proto_rawDesc = []byte{
|
||||
0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65,
|
||||
0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
|
||||
0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||
0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12,
|
||||
0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
|
||||
0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a,
|
||||
0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
|
||||
0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x42,
|
||||
0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
|
||||
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
|
||||
0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce sync.Once
|
||||
file_github_com_containerd_containerd_api_types_event_proto_rawDescData = file_github_com_containerd_containerd_api_types_event_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_github_com_containerd_containerd_api_types_event_proto_rawDescGZIP() []byte {
|
||||
file_github_com_containerd_containerd_api_types_event_proto_rawDescOnce.Do(func() {
|
||||
file_github_com_containerd_containerd_api_types_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_event_proto_rawDescData)
|
||||
})
|
||||
return file_github_com_containerd_containerd_api_types_event_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_github_com_containerd_containerd_api_types_event_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_github_com_containerd_containerd_api_types_event_proto_goTypes = []interface{}{
|
||||
(*Envelope)(nil), // 0: containerd.types.Envelope
|
||||
(*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
|
||||
(*anypb.Any)(nil), // 2: google.protobuf.Any
|
||||
}
|
||||
var file_github_com_containerd_containerd_api_types_event_proto_depIdxs = []int32{
|
||||
1, // 0: containerd.types.Envelope.timestamp:type_name -> google.protobuf.Timestamp
|
||||
2, // 1: containerd.types.Envelope.event:type_name -> google.protobuf.Any
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_containerd_containerd_api_types_event_proto_init() }
|
||||
func file_github_com_containerd_containerd_api_types_event_proto_init() {
|
||||
if File_github_com_containerd_containerd_api_types_event_proto != nil {
|
||||
return
|
||||
}
|
||||
file_github_com_containerd_containerd_api_types_fieldpath_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_github_com_containerd_containerd_api_types_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Envelope); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_containerd_containerd_api_types_event_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_containerd_containerd_api_types_event_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_containerd_containerd_api_types_event_proto_depIdxs,
|
||||
MessageInfos: file_github_com_containerd_containerd_api_types_event_proto_msgTypes,
|
||||
}.Build()
|
||||
File_github_com_containerd_containerd_api_types_event_proto = out.File
|
||||
file_github_com_containerd_containerd_api_types_event_proto_rawDesc = nil
|
||||
file_github_com_containerd_containerd_api_types_event_proto_goTypes = nil
|
||||
file_github_com_containerd_containerd_api_types_event_proto_depIdxs = nil
|
||||
}
|
33
e2e/vendor/github.com/containerd/containerd/api/types/event.proto
generated
vendored
Normal file
33
e2e/vendor/github.com/containerd/containerd/api/types/event.proto
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.types;
|
||||
|
||||
import "github.com/containerd/containerd/api/types/fieldpath.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||
|
||||
message Envelope {
|
||||
option (containerd.types.fieldpath) = true;
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string namespace = 2;
|
||||
string topic = 3;
|
||||
google.protobuf.Any event = 4;
|
||||
}
|
375
e2e/vendor/github.com/containerd/containerd/api/types/introspection.pb.go
generated
vendored
Normal file
375
e2e/vendor/github.com/containerd/containerd/api/types/introspection.pb.go
generated
vendored
Normal file
@ -0,0 +1,375 @@
|
||||
//
|
||||
//Copyright The containerd Authors.
|
||||
//
|
||||
//Licensed under the Apache License, Version 2.0 (the "License");
|
||||
//you may not use this file except in compliance with the License.
|
||||
//You may obtain a copy of the License at
|
||||
//
|
||||
//http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
//Unless required by applicable law or agreed to in writing, software
|
||||
//distributed under the License is distributed on an "AS IS" BASIS,
|
||||
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//See the License for the specific language governing permissions and
|
||||
//limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.20.1
|
||||
// source: github.com/containerd/containerd/api/types/introspection.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type RuntimeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RuntimePath string `protobuf:"bytes,1,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"`
|
||||
// Options correspond to CreateTaskRequest.options.
|
||||
// This is needed to pass the runc binary path, etc.
|
||||
Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RuntimeRequest) Reset() {
|
||||
*x = RuntimeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RuntimeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RuntimeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RuntimeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RuntimeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RuntimeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RuntimeRequest) GetRuntimePath() string {
|
||||
if x != nil {
|
||||
return x.RuntimePath
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RuntimeRequest) GetOptions() *anypb.Any {
|
||||
if x != nil {
|
||||
return x.Options
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RuntimeVersion struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RuntimeVersion) Reset() {
|
||||
*x = RuntimeVersion{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RuntimeVersion) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RuntimeVersion) ProtoMessage() {}
|
||||
|
||||
func (x *RuntimeVersion) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RuntimeVersion.ProtoReflect.Descriptor instead.
|
||||
func (*RuntimeVersion) Descriptor() ([]byte, []int) {
|
||||
return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *RuntimeVersion) GetVersion() string {
|
||||
if x != nil {
|
||||
return x.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RuntimeVersion) GetRevision() string {
|
||||
if x != nil {
|
||||
return x.Revision
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RuntimeInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Version *RuntimeVersion `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)
|
||||
Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
|
||||
// OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md
|
||||
Features *anypb.Any `protobuf:"bytes,4,opt,name=features,proto3" json:"features,omitempty"`
|
||||
// Annotations of the shim. Irrelevant to features.Annotations.
|
||||
Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) Reset() {
|
||||
*x = RuntimeInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RuntimeInfo) ProtoMessage() {}
|
||||
|
||||
func (x *RuntimeInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RuntimeInfo.ProtoReflect.Descriptor instead.
|
||||
func (*RuntimeInfo) Descriptor() ([]byte, []int) {
|
||||
return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) GetVersion() *RuntimeVersion {
|
||||
if x != nil {
|
||||
return x.Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) GetOptions() *anypb.Any {
|
||||
if x != nil {
|
||||
return x.Options
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) GetFeatures() *anypb.Any {
|
||||
if x != nil {
|
||||
return x.Features
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RuntimeInfo) GetAnnotations() map[string]string {
|
||||
if x != nil {
|
||||
return x.Annotations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_github_com_containerd_containerd_api_types_introspection_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = []byte{
|
||||
0x0a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74,
|
||||
0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
|
||||
0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x61,
|
||||
0x74, 0x68, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x22, 0x46, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72,
|
||||
0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x52,
|
||||
0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a,
|
||||
0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
|
||||
0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x66, 0x65,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
|
||||
0x6e, 0x79, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x0b,
|
||||
0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e,
|
||||
0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74,
|
||||
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32,
|
||||
0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce sync.Once
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_github_com_containerd_containerd_api_types_introspection_proto_rawDescGZIP() []byte {
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_rawDescOnce.Do(func() {
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData)
|
||||
})
|
||||
return file_github_com_containerd_containerd_api_types_introspection_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = []interface{}{
|
||||
(*RuntimeRequest)(nil), // 0: containerd.types.RuntimeRequest
|
||||
(*RuntimeVersion)(nil), // 1: containerd.types.RuntimeVersion
|
||||
(*RuntimeInfo)(nil), // 2: containerd.types.RuntimeInfo
|
||||
nil, // 3: containerd.types.RuntimeInfo.AnnotationsEntry
|
||||
(*anypb.Any)(nil), // 4: google.protobuf.Any
|
||||
}
|
||||
var file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = []int32{
|
||||
4, // 0: containerd.types.RuntimeRequest.options:type_name -> google.protobuf.Any
|
||||
1, // 1: containerd.types.RuntimeInfo.version:type_name -> containerd.types.RuntimeVersion
|
||||
4, // 2: containerd.types.RuntimeInfo.options:type_name -> google.protobuf.Any
|
||||
4, // 3: containerd.types.RuntimeInfo.features:type_name -> google.protobuf.Any
|
||||
3, // 4: containerd.types.RuntimeInfo.annotations:type_name -> containerd.types.RuntimeInfo.AnnotationsEntry
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_containerd_containerd_api_types_introspection_proto_init() }
|
||||
func file_github_com_containerd_containerd_api_types_introspection_proto_init() {
|
||||
if File_github_com_containerd_containerd_api_types_introspection_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RuntimeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RuntimeVersion); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RuntimeInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_containerd_containerd_api_types_introspection_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs,
|
||||
MessageInfos: file_github_com_containerd_containerd_api_types_introspection_proto_msgTypes,
|
||||
}.Build()
|
||||
File_github_com_containerd_containerd_api_types_introspection_proto = out.File
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_rawDesc = nil
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_goTypes = nil
|
||||
file_github_com_containerd_containerd_api_types_introspection_proto_depIdxs = nil
|
||||
}
|
46
e2e/vendor/github.com/containerd/containerd/api/types/introspection.proto
generated
vendored
Normal file
46
e2e/vendor/github.com/containerd/containerd/api/types/introspection.proto
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.types;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||
|
||||
message RuntimeRequest {
|
||||
string runtime_path = 1;
|
||||
// Options correspond to CreateTaskRequest.options.
|
||||
// This is needed to pass the runc binary path, etc.
|
||||
google.protobuf.Any options = 2;
|
||||
}
|
||||
|
||||
message RuntimeVersion {
|
||||
string version = 1;
|
||||
string revision = 2;
|
||||
}
|
||||
|
||||
message RuntimeInfo {
|
||||
string name = 1;
|
||||
RuntimeVersion version = 2;
|
||||
// Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)
|
||||
google.protobuf.Any options = 3;
|
||||
// OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md
|
||||
google.protobuf.Any features = 4;
|
||||
// Annotations of the shim. Irrelevant to features.Annotations.
|
||||
map<string, string> annotations = 5;
|
||||
}
|
22
e2e/vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
22
e2e/vendor/github.com/containerd/containerd/api/types/platform.pb.go
generated
vendored
@ -45,6 +45,7 @@ type Platform struct {
|
||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
||||
OSVersion string `protobuf:"bytes,4,opt,name=os_version,json=osVersion,proto3" json:"os_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Platform) Reset() {
|
||||
@ -100,6 +101,13 @@ func (x *Platform) GetVariant() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Platform) GetOsVersion() string {
|
||||
if x != nil {
|
||||
return x.OSVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_github_com_containerd_containerd_api_types_platform_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []byte{
|
||||
@ -107,17 +115,19 @@ var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []b
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61,
|
||||
0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x58, 0x0a,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x77, 0x0a,
|
||||
0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63,
|
||||
0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
|
||||
0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
|
||||
0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x73, 0x5f, 0x76, 0x65,
|
||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x73, 0x56,
|
||||
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
|
||||
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
1
e2e/vendor/github.com/containerd/containerd/api/types/platform.proto
generated
vendored
1
e2e/vendor/github.com/containerd/containerd/api/types/platform.proto
generated
vendored
@ -26,4 +26,5 @@ message Platform {
|
||||
string os = 1;
|
||||
string architecture = 2;
|
||||
string variant = 3;
|
||||
string os_version = 4;
|
||||
}
|
||||
|
49
e2e/vendor/github.com/containerd/containerd/api/types/platform_helpers.go
generated
vendored
Normal file
49
e2e/vendor/github.com/containerd/containerd/api/types/platform_helpers.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import oci "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
// OCIPlatformToProto converts from a slice of OCI [specs.Platform] to a
|
||||
// slice of the protobuf definition [Platform].
|
||||
func OCIPlatformToProto(platforms []oci.Platform) []*Platform {
|
||||
ap := make([]*Platform, len(platforms))
|
||||
for i := range platforms {
|
||||
ap[i] = &Platform{
|
||||
OS: platforms[i].OS,
|
||||
OSVersion: platforms[i].OSVersion,
|
||||
Architecture: platforms[i].Architecture,
|
||||
Variant: platforms[i].Variant,
|
||||
}
|
||||
}
|
||||
return ap
|
||||
}
|
||||
|
||||
// OCIPlatformFromProto converts a slice of the protobuf definition [Platform]
|
||||
// to a slice of OCI [specs.Platform].
|
||||
func OCIPlatformFromProto(platforms []*Platform) []oci.Platform {
|
||||
op := make([]oci.Platform, len(platforms))
|
||||
for i := range platforms {
|
||||
op[i] = oci.Platform{
|
||||
OS: platforms[i].OS,
|
||||
OSVersion: platforms[i].OSVersion,
|
||||
Architecture: platforms[i].Architecture,
|
||||
Variant: platforms[i].Variant,
|
||||
}
|
||||
}
|
||||
return op
|
||||
}
|
51
e2e/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go
generated
vendored
51
e2e/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go
generated
vendored
@ -59,6 +59,8 @@ type Sandbox struct {
|
||||
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
|
||||
// Extensions allow clients to provide optional blobs that can be handled by runtime.
|
||||
Extensions map[string]*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Sandboxer is the name of the sandbox controller who manages the sandbox.
|
||||
Sandboxer string `protobuf:"bytes,10,opt,name=sandboxer,proto3" json:"sandboxer,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Sandbox) Reset() {
|
||||
@ -142,6 +144,13 @@ func (x *Sandbox) GetExtensions() map[string]*anypb.Any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Sandbox) GetSandboxer() string {
|
||||
if x != nil {
|
||||
return x.Sandboxer
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Sandbox_Runtime struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@ -211,7 +220,7 @@ var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []by
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
|
||||
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x04, 0x0a, 0x07, 0x53, 0x61, 0x6e,
|
||||
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8c, 0x05, 0x0a, 0x07, 0x53, 0x61, 0x6e,
|
||||
0x64, 0x62, 0x6f, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f,
|
||||
0x78, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02,
|
||||
@ -236,25 +245,27 @@ var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []by
|
||||
0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
|
||||
0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x45,
|
||||
0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
|
||||
0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75,
|
||||
0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
|
||||
0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62,
|
||||
0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x3a, 0x02, 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x61,
|
||||
0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
|
||||
0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07,
|
||||
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
|
||||
0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64,
|
||||
0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
3
e2e/vendor/github.com/containerd/containerd/api/types/sandbox.proto
generated
vendored
3
e2e/vendor/github.com/containerd/containerd/api/types/sandbox.proto
generated
vendored
@ -48,4 +48,7 @@ message Sandbox {
|
||||
google.protobuf.Timestamp updated_at = 6;
|
||||
// Extensions allow clients to provide optional blobs that can be handled by runtime.
|
||||
map<string, google.protobuf.Any> extensions = 7;
|
||||
// Sandboxer is the name of the sandbox controller who manages the sandbox.
|
||||
string sandboxer = 10;
|
||||
|
||||
}
|
||||
|
439
e2e/vendor/github.com/containerd/errdefs/errors.go
generated
vendored
439
e2e/vendor/github.com/containerd/errdefs/errors.go
generated
vendored
@ -21,9 +21,6 @@
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
@ -36,57 +33,411 @@ import (
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
// These errors map closely to grpc errors.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
ErrUnknown = errUnknown{}
|
||||
ErrInvalidArgument = errInvalidArgument{}
|
||||
ErrNotFound = errNotFound{}
|
||||
ErrAlreadyExists = errAlreadyExists{}
|
||||
ErrPermissionDenied = errPermissionDenied{}
|
||||
ErrResourceExhausted = errResourceExhausted{}
|
||||
ErrFailedPrecondition = errFailedPrecondition{}
|
||||
ErrConflict = errConflict{}
|
||||
ErrNotModified = errNotModified{}
|
||||
ErrAborted = errAborted{}
|
||||
ErrOutOfRange = errOutOfRange{}
|
||||
ErrNotImplemented = errNotImplemented{}
|
||||
ErrInternal = errInternal{}
|
||||
ErrUnavailable = errUnavailable{}
|
||||
ErrDataLoss = errDataLoss{}
|
||||
ErrUnauthenticated = errUnauthorized{}
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, ErrUnavailable)
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, ErrNotImplemented)
|
||||
// cancelled maps to Moby's "ErrCancelled"
|
||||
type cancelled interface {
|
||||
Cancelled()
|
||||
}
|
||||
|
||||
// IsCanceled returns true if the error is due to `context.Canceled`.
|
||||
func IsCanceled(err error) bool {
|
||||
return errors.Is(err, context.Canceled)
|
||||
return errors.Is(err, context.Canceled) || isInterface[cancelled](err)
|
||||
}
|
||||
|
||||
type errUnknown struct{}
|
||||
|
||||
func (errUnknown) Error() string { return "unknown" }
|
||||
|
||||
func (errUnknown) Unknown() {}
|
||||
|
||||
func (e errUnknown) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unknown maps to Moby's "ErrUnknown"
|
||||
type unknown interface {
|
||||
Unknown()
|
||||
}
|
||||
|
||||
// IsUnknown returns true if the error is due to an unknown error,
|
||||
// unhandled condition or unexpected response.
|
||||
func IsUnknown(err error) bool {
|
||||
return errors.Is(err, errUnknown{}) || isInterface[unknown](err)
|
||||
}
|
||||
|
||||
type errInvalidArgument struct{}
|
||||
|
||||
func (errInvalidArgument) Error() string { return "invalid argument" }
|
||||
|
||||
func (errInvalidArgument) InvalidParameter() {}
|
||||
|
||||
func (e errInvalidArgument) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// invalidParameter maps to Moby's "ErrInvalidParameter"
|
||||
type invalidParameter interface {
|
||||
InvalidParameter()
|
||||
}
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err)
|
||||
}
|
||||
|
||||
// deadlineExceed maps to Moby's "ErrDeadline"
|
||||
type deadlineExceeded interface {
|
||||
DeadlineExceeded()
|
||||
}
|
||||
|
||||
// IsDeadlineExceeded returns true if the error is due to
|
||||
// `context.DeadlineExceeded`.
|
||||
func IsDeadlineExceeded(err error) bool {
|
||||
return errors.Is(err, context.DeadlineExceeded)
|
||||
return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err)
|
||||
}
|
||||
|
||||
type errNotFound struct{}
|
||||
|
||||
func (errNotFound) Error() string { return "not found" }
|
||||
|
||||
func (errNotFound) NotFound() {}
|
||||
|
||||
func (e errNotFound) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notFound maps to Moby's "ErrNotFound"
|
||||
type notFound interface {
|
||||
NotFound()
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound) || isInterface[notFound](err)
|
||||
}
|
||||
|
||||
type errAlreadyExists struct{}
|
||||
|
||||
func (errAlreadyExists) Error() string { return "already exists" }
|
||||
|
||||
func (errAlreadyExists) AlreadyExists() {}
|
||||
|
||||
func (e errAlreadyExists) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type alreadyExists interface {
|
||||
AlreadyExists()
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err)
|
||||
}
|
||||
|
||||
type errPermissionDenied struct{}
|
||||
|
||||
func (errPermissionDenied) Error() string { return "permission denied" }
|
||||
|
||||
func (errPermissionDenied) Forbidden() {}
|
||||
|
||||
func (e errPermissionDenied) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// forbidden maps to Moby's "ErrForbidden"
|
||||
type forbidden interface {
|
||||
Forbidden()
|
||||
}
|
||||
|
||||
// IsPermissionDenied returns true if the error is due to permission denied
|
||||
// or forbidden (403) response
|
||||
func IsPermissionDenied(err error) bool {
|
||||
return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err)
|
||||
}
|
||||
|
||||
type errResourceExhausted struct{}
|
||||
|
||||
func (errResourceExhausted) Error() string { return "resource exhausted" }
|
||||
|
||||
func (errResourceExhausted) ResourceExhausted() {}
|
||||
|
||||
func (e errResourceExhausted) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type resourceExhausted interface {
|
||||
ResourceExhausted()
|
||||
}
|
||||
|
||||
// IsResourceExhausted returns true if the error is due to
|
||||
// a lack of resources or too many attempts.
|
||||
func IsResourceExhausted(err error) bool {
|
||||
return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err)
|
||||
}
|
||||
|
||||
type errFailedPrecondition struct{}
|
||||
|
||||
func (e errFailedPrecondition) Error() string { return "failed precondition" }
|
||||
|
||||
func (errFailedPrecondition) FailedPrecondition() {}
|
||||
|
||||
func (e errFailedPrecondition) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type failedPrecondition interface {
|
||||
FailedPrecondition()
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed due to
|
||||
// the lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err)
|
||||
}
|
||||
|
||||
type errConflict struct{}
|
||||
|
||||
func (errConflict) Error() string { return "conflict" }
|
||||
|
||||
func (errConflict) Conflict() {}
|
||||
|
||||
func (e errConflict) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// conflict maps to Moby's "ErrConflict"
|
||||
type conflict interface {
|
||||
Conflict()
|
||||
}
|
||||
|
||||
// IsConflict returns true if an operation could not proceed due to
|
||||
// a conflict.
|
||||
func IsConflict(err error) bool {
|
||||
return errors.Is(err, errConflict{}) || isInterface[conflict](err)
|
||||
}
|
||||
|
||||
type errNotModified struct{}
|
||||
|
||||
func (errNotModified) Error() string { return "not modified" }
|
||||
|
||||
func (errNotModified) NotModified() {}
|
||||
|
||||
func (e errNotModified) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notModified maps to Moby's "ErrNotModified"
|
||||
type notModified interface {
|
||||
NotModified()
|
||||
}
|
||||
|
||||
// IsNotModified returns true if an operation could not proceed due
|
||||
// to an object not modified from a previous state.
|
||||
func IsNotModified(err error) bool {
|
||||
return errors.Is(err, errNotModified{}) || isInterface[notModified](err)
|
||||
}
|
||||
|
||||
type errAborted struct{}
|
||||
|
||||
func (errAborted) Error() string { return "aborted" }
|
||||
|
||||
func (errAborted) Aborted() {}
|
||||
|
||||
func (e errAborted) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type aborted interface {
|
||||
Aborted()
|
||||
}
|
||||
|
||||
// IsAborted returns true if an operation was aborted.
|
||||
func IsAborted(err error) bool {
|
||||
return errors.Is(err, errAborted{}) || isInterface[aborted](err)
|
||||
}
|
||||
|
||||
type errOutOfRange struct{}
|
||||
|
||||
func (errOutOfRange) Error() string { return "out of range" }
|
||||
|
||||
func (errOutOfRange) OutOfRange() {}
|
||||
|
||||
func (e errOutOfRange) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
type outOfRange interface {
|
||||
OutOfRange()
|
||||
}
|
||||
|
||||
// IsOutOfRange returns true if an operation could not proceed due
|
||||
// to data being out of the expected range.
|
||||
func IsOutOfRange(err error) bool {
|
||||
return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err)
|
||||
}
|
||||
|
||||
type errNotImplemented struct{}
|
||||
|
||||
func (errNotImplemented) Error() string { return "not implemented" }
|
||||
|
||||
func (errNotImplemented) NotImplemented() {}
|
||||
|
||||
func (e errNotImplemented) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// notImplemented maps to Moby's "ErrNotImplemented"
|
||||
type notImplemented interface {
|
||||
NotImplemented()
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err)
|
||||
}
|
||||
|
||||
type errInternal struct{}
|
||||
|
||||
func (errInternal) Error() string { return "internal" }
|
||||
|
||||
func (errInternal) System() {}
|
||||
|
||||
func (e errInternal) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// system maps to Moby's "ErrSystem"
|
||||
type system interface {
|
||||
System()
|
||||
}
|
||||
|
||||
// IsInternal returns true if the error returns to an internal or system error
|
||||
func IsInternal(err error) bool {
|
||||
return errors.Is(err, errInternal{}) || isInterface[system](err)
|
||||
}
|
||||
|
||||
type errUnavailable struct{}
|
||||
|
||||
func (errUnavailable) Error() string { return "unavailable" }
|
||||
|
||||
func (errUnavailable) Unavailable() {}
|
||||
|
||||
func (e errUnavailable) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unavailable maps to Moby's "ErrUnavailable"
|
||||
type unavailable interface {
|
||||
Unavailable()
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err)
|
||||
}
|
||||
|
||||
type errDataLoss struct{}
|
||||
|
||||
func (errDataLoss) Error() string { return "data loss" }
|
||||
|
||||
func (errDataLoss) DataLoss() {}
|
||||
|
||||
func (e errDataLoss) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// dataLoss maps to Moby's "ErrDataLoss"
|
||||
type dataLoss interface {
|
||||
DataLoss()
|
||||
}
|
||||
|
||||
// IsDataLoss returns true if data during an operation was lost or corrupted
|
||||
func IsDataLoss(err error) bool {
|
||||
return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err)
|
||||
}
|
||||
|
||||
type errUnauthorized struct{}
|
||||
|
||||
func (errUnauthorized) Error() string { return "unauthorized" }
|
||||
|
||||
func (errUnauthorized) Unauthorized() {}
|
||||
|
||||
func (e errUnauthorized) WithMessage(msg string) error {
|
||||
return customMessage{e, msg}
|
||||
}
|
||||
|
||||
// unauthorized maps to Moby's "ErrUnauthorized"
|
||||
type unauthorized interface {
|
||||
Unauthorized()
|
||||
}
|
||||
|
||||
// IsUnauthorized returns true if the error indicates that the user was
|
||||
// unauthenticated or unauthorized.
|
||||
func IsUnauthorized(err error) bool {
|
||||
return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err)
|
||||
}
|
||||
|
||||
func isInterface[T any](err error) bool {
|
||||
for {
|
||||
switch x := err.(type) {
|
||||
case T:
|
||||
return true
|
||||
case customMessage:
|
||||
err = x.err
|
||||
case interface{ Unwrap() error }:
|
||||
err = x.Unwrap()
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
case interface{ Unwrap() []error }:
|
||||
for _, err := range x.Unwrap() {
|
||||
if isInterface[T](err) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// customMessage is used to provide a defined error with a custom message.
|
||||
// The message is not wrapped but can be compared by the `Is(error) bool` interface.
|
||||
type customMessage struct {
|
||||
err error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (c customMessage) Is(err error) bool {
|
||||
return c.err == err
|
||||
}
|
||||
|
||||
func (c customMessage) As(target any) bool {
|
||||
return errors.As(c.err, target)
|
||||
}
|
||||
|
||||
func (c customMessage) Error() string {
|
||||
return c.msg
|
||||
}
|
||||
|
147
e2e/vendor/github.com/containerd/errdefs/grpc.go
generated
vendored
147
e2e/vendor/github.com/containerd/errdefs/grpc.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
case IsCanceled(err):
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
case IsDeadlineExceeded(err):
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@ -176,24 +176,13 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
353
e2e/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
353
e2e/vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
@ -0,0 +1,353 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errgrpc provides utility functions for translating errors to
|
||||
// and from a gRPC context.
|
||||
//
|
||||
// The functions ToGRPC and ToNative can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/containerd/typeurl/v2"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/internal/cause"
|
||||
"github.com/containerd/errdefs/pkg/internal/types"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the error into a grpc error, from the error types
|
||||
// defined in the the errdefs package and attempign to preserve the original
|
||||
// description. Any type which does not resolve to a defined error type will
|
||||
// be assigned the unknown error code.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type. The grpc error details will be used to attempt to preserve as much of
|
||||
// the error structures and types as possible.
|
||||
//
|
||||
// Errors which can be marshaled using protobuf or typeurl will be considered
|
||||
// for including as GRPC error details.
|
||||
// Additionally, use the following interfaces in errors to preserve custom types:
|
||||
//
|
||||
// WrapError(error) error - Used to wrap the previous error
|
||||
// JoinErrors(...error) error - Used to join all previous errors
|
||||
// CollapseError() - Used for errors which carry information but
|
||||
// should not have their error message shown.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := status.FromError(err); ok {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
st := statusFromError(err)
|
||||
if st != nil {
|
||||
if details := errorDetails(err, false); len(details) > 0 {
|
||||
if ds, _ := st.WithDetails(details...); ds != nil {
|
||||
st = ds
|
||||
}
|
||||
}
|
||||
err = st.Err()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func statusFromError(err error) *status.Status {
|
||||
switch errdefs.Resolve(err) {
|
||||
case errdefs.ErrInvalidArgument:
|
||||
return status.New(codes.InvalidArgument, err.Error())
|
||||
case errdefs.ErrNotFound:
|
||||
return status.New(codes.NotFound, err.Error())
|
||||
case errdefs.ErrAlreadyExists:
|
||||
return status.New(codes.AlreadyExists, err.Error())
|
||||
case errdefs.ErrPermissionDenied:
|
||||
return status.New(codes.PermissionDenied, err.Error())
|
||||
case errdefs.ErrResourceExhausted:
|
||||
return status.New(codes.ResourceExhausted, err.Error())
|
||||
case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified:
|
||||
return status.New(codes.FailedPrecondition, err.Error())
|
||||
case errdefs.ErrAborted:
|
||||
return status.New(codes.Aborted, err.Error())
|
||||
case errdefs.ErrOutOfRange:
|
||||
return status.New(codes.OutOfRange, err.Error())
|
||||
case errdefs.ErrNotImplemented:
|
||||
return status.New(codes.Unimplemented, err.Error())
|
||||
case errdefs.ErrInternal:
|
||||
return status.New(codes.Internal, err.Error())
|
||||
case errdefs.ErrUnavailable:
|
||||
return status.New(codes.Unavailable, err.Error())
|
||||
case errdefs.ErrDataLoss:
|
||||
return status.New(codes.DataLoss, err.Error())
|
||||
case errdefs.ErrUnauthenticated:
|
||||
return status.New(codes.Unauthenticated, err.Error())
|
||||
case context.DeadlineExceeded:
|
||||
return status.New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.New(codes.Canceled, err.Error())
|
||||
case errdefs.ErrUnknown:
|
||||
return status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorDetails returns an array of errors which make up the provided error.
|
||||
// If firstIncluded is true, then all encodable errors will be used, otherwise
|
||||
// the first error in an error list will be not be used, to account for the
|
||||
// the base status error which details are added to via wrap or join.
|
||||
//
|
||||
// The errors are ordered in way that they can be applied in order by either
|
||||
// wrapping or joining the errors to recreate an error with the same structure
|
||||
// when `WrapError` and `JoinErrors` interfaces are used.
|
||||
//
|
||||
// The intent is that when re-applying the errors to create a single error, the
|
||||
// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting
|
||||
// is the same as the original error.
|
||||
func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 {
|
||||
switch uerr := err.(type) {
|
||||
case interface{ Unwrap() error }:
|
||||
details := errorDetails(uerr.Unwrap(), firstIncluded)
|
||||
|
||||
// If the type is able to wrap, then include if proto
|
||||
if _, ok := err.(interface{ WrapError(error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
|
||||
return details
|
||||
case interface{ Unwrap() []error }:
|
||||
var details []protoadapt.MessageV1
|
||||
for i, e := range uerr.Unwrap() {
|
||||
details = append(details, errorDetails(e, firstIncluded || i > 0)...)
|
||||
}
|
||||
|
||||
if _, ok := err.(interface{ JoinErrors(...error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
if firstIncluded {
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
return []protoadapt.MessageV1{protoErr}
|
||||
}
|
||||
if gs, ok := status.FromError(ToGRPC(err)); ok {
|
||||
return []protoadapt.MessageV1{gs.Proto()}
|
||||
}
|
||||
// TODO: Else include unknown extra error type?
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toProtoMessage(err error) protoadapt.MessageV1 {
|
||||
// Do not double encode proto messages, otherwise use Any
|
||||
if pm, ok := err.(protoadapt.MessageV1); ok {
|
||||
return pm
|
||||
}
|
||||
if pm, ok := err.(proto.Message); ok {
|
||||
return protoadapt.MessageV1Of(pm)
|
||||
}
|
||||
|
||||
if reflect.TypeOf(err).Kind() == reflect.Ptr {
|
||||
a, aerr := typeurl.MarshalAny(err)
|
||||
if aerr == nil {
|
||||
return &anypb.Any{
|
||||
TypeUrl: a.GetTypeUrl(),
|
||||
Value: a.GetValue(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// ToNative returns the underlying error from a grpc service based on the grpc
|
||||
// error code. The grpc details are used to add wrap the error in more context
|
||||
// or support multiple errors.
|
||||
func ToNative(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, isGRPC := status.FromError(err)
|
||||
|
||||
var (
|
||||
desc string
|
||||
code codes.Code
|
||||
)
|
||||
|
||||
if isGRPC {
|
||||
desc = s.Message()
|
||||
code = s.Code()
|
||||
} else {
|
||||
desc = err.Error()
|
||||
code = codes.Unknown
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code {
|
||||
case codes.InvalidArgument:
|
||||
cls = errdefs.ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = errdefs.ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = errdefs.ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = errdefs.ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
// TODO: Has suffix is not sufficient for conflict and not modified
|
||||
// Message should start with ": " or be at beginning of a line
|
||||
// Message should end with ": " or be at the end of a line
|
||||
// Compile a regex
|
||||
if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) {
|
||||
cls = errdefs.ErrConflict
|
||||
} else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) {
|
||||
cls = errdefs.ErrNotModified
|
||||
} else {
|
||||
cls = errdefs.ErrFailedPrecondition
|
||||
}
|
||||
case codes.Unimplemented:
|
||||
cls = errdefs.ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
case codes.Aborted:
|
||||
cls = errdefs.ErrAborted
|
||||
case codes.Unauthenticated:
|
||||
cls = errdefs.ErrUnauthenticated
|
||||
case codes.PermissionDenied:
|
||||
cls = errdefs.ErrPermissionDenied
|
||||
case codes.Internal:
|
||||
cls = errdefs.ErrInternal
|
||||
case codes.DataLoss:
|
||||
cls = errdefs.ErrDataLoss
|
||||
case codes.OutOfRange:
|
||||
cls = errdefs.ErrOutOfRange
|
||||
case codes.ResourceExhausted:
|
||||
cls = errdefs.ErrResourceExhausted
|
||||
default:
|
||||
if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 {
|
||||
if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 {
|
||||
cls = cause.ErrUnexpectedStatus{Status: status}
|
||||
}
|
||||
}
|
||||
if cls == nil {
|
||||
cls = errdefs.ErrUnknown
|
||||
}
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, desc)
|
||||
if msg == "" {
|
||||
err = cls
|
||||
} else if msg != desc {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else if wm, ok := cls.(interface{ WithMessage(string) error }); ok {
|
||||
err = wm.WithMessage(msg)
|
||||
} else {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
}
|
||||
|
||||
if isGRPC {
|
||||
errs := []error{err}
|
||||
for _, a := range s.Details() {
|
||||
var derr error
|
||||
|
||||
// First decode error if needed
|
||||
if s, ok := a.(*spb.Status); ok {
|
||||
derr = ToNative(status.ErrorProto(s))
|
||||
} else if e, ok := a.(error); ok {
|
||||
derr = e
|
||||
} else if dany, ok := a.(typeurl.Any); ok {
|
||||
i, uerr := typeurl.UnmarshalAny(dany)
|
||||
if uerr == nil {
|
||||
if e, ok = i.(error); ok {
|
||||
derr = e
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error unmarshalled detail: %v", i)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error detail: %v", a)
|
||||
}
|
||||
|
||||
switch werr := derr.(type) {
|
||||
case interface{ WrapError(error) error }:
|
||||
errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1])
|
||||
case interface{ JoinErrors(...error) error }:
|
||||
// TODO: Consider whether this should support joining a subset
|
||||
errs[0] = werr.JoinErrors(errs...)
|
||||
case interface{ CollapseError() }:
|
||||
errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr)
|
||||
default:
|
||||
errs = append(errs, derr)
|
||||
}
|
||||
|
||||
}
|
||||
if len(errs) > 1 {
|
||||
err = errors.Join(errs...)
|
||||
} else {
|
||||
err = errs[0]
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, desc string) string {
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
33
e2e/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
33
e2e/vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cause is used to define root causes for errors
|
||||
// common to errors packages like grpc and http.
|
||||
package cause
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ErrUnexpectedStatus struct {
|
||||
Status int
|
||||
}
|
||||
|
||||
const UnexpectedStatusPrefix = "unexpected status "
|
||||
|
||||
func (e ErrUnexpectedStatus) Error() string {
|
||||
return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
|
||||
}
|
||||
|
||||
func (ErrUnexpectedStatus) Unknown() {}
|
57
e2e/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
57
e2e/vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import "fmt"
|
||||
|
||||
// CollapsibleError indicates the error should be collapsed
|
||||
type CollapsibleError interface {
|
||||
CollapseError()
|
||||
}
|
||||
|
||||
// CollapsedError returns a new error with the collapsed
|
||||
// error returned on unwrapped or when formatted with "%+v"
|
||||
func CollapsedError(err error, collapsed ...error) error {
|
||||
return collapsedError{err, collapsed}
|
||||
}
|
||||
|
||||
type collapsedError struct {
|
||||
error
|
||||
collapsed []error
|
||||
}
|
||||
|
||||
func (c collapsedError) Unwrap() []error {
|
||||
return append([]error{c.error}, c.collapsed...)
|
||||
}
|
||||
|
||||
func (c collapsedError) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", c.error)
|
||||
for _, err := range c.collapsed {
|
||||
fmt.Fprintf(s, "\n%+v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
fmt.Fprint(s, c.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", c.Error())
|
||||
}
|
||||
}
|
147
e2e/vendor/github.com/containerd/errdefs/resolve.go
generated
vendored
Normal file
147
e2e/vendor/github.com/containerd/errdefs/resolve.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import "context"
|
||||
|
||||
// Resolve returns the first error found in the error chain which matches an
|
||||
// error defined in this package or context error. A raw, unwrapped error is
|
||||
// returned or ErrUnknown if no matching error is found.
|
||||
//
|
||||
// This is useful for determining a response code based on the outermost wrapped
|
||||
// error rather than the original cause. For example, a not found error deep
|
||||
// in the code may be wrapped as an invalid argument. When determining status
|
||||
// code from Is* functions, the depth or ordering of the error is not
|
||||
// considered.
|
||||
//
|
||||
// The search order is depth first, a wrapped error returned from any part of
|
||||
// the chain from `Unwrap() error` will be returned before any joined errors
|
||||
// as returned by `Unwrap() []error`.
|
||||
func Resolve(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = firstError(err)
|
||||
if err == nil {
|
||||
err = ErrUnknown
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func firstError(err error) error {
|
||||
for {
|
||||
switch err {
|
||||
case ErrUnknown,
|
||||
ErrInvalidArgument,
|
||||
ErrNotFound,
|
||||
ErrAlreadyExists,
|
||||
ErrPermissionDenied,
|
||||
ErrResourceExhausted,
|
||||
ErrFailedPrecondition,
|
||||
ErrConflict,
|
||||
ErrNotModified,
|
||||
ErrAborted,
|
||||
ErrOutOfRange,
|
||||
ErrNotImplemented,
|
||||
ErrInternal,
|
||||
ErrUnavailable,
|
||||
ErrDataLoss,
|
||||
ErrUnauthenticated,
|
||||
context.DeadlineExceeded,
|
||||
context.Canceled:
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case customMessage:
|
||||
err = e.err
|
||||
case unknown:
|
||||
return ErrUnknown
|
||||
case invalidParameter:
|
||||
return ErrInvalidArgument
|
||||
case notFound:
|
||||
return ErrNotFound
|
||||
case alreadyExists:
|
||||
return ErrAlreadyExists
|
||||
case forbidden:
|
||||
return ErrPermissionDenied
|
||||
case resourceExhausted:
|
||||
return ErrResourceExhausted
|
||||
case failedPrecondition:
|
||||
return ErrFailedPrecondition
|
||||
case conflict:
|
||||
return ErrConflict
|
||||
case notModified:
|
||||
return ErrNotModified
|
||||
case aborted:
|
||||
return ErrAborted
|
||||
case errOutOfRange:
|
||||
return ErrOutOfRange
|
||||
case notImplemented:
|
||||
return ErrNotImplemented
|
||||
case system:
|
||||
return ErrInternal
|
||||
case unavailable:
|
||||
return ErrUnavailable
|
||||
case dataLoss:
|
||||
return ErrDataLoss
|
||||
case unauthorized:
|
||||
return ErrUnauthenticated
|
||||
case deadlineExceeded:
|
||||
return context.DeadlineExceeded
|
||||
case cancelled:
|
||||
return context.Canceled
|
||||
case interface{ Unwrap() error }:
|
||||
err = e.Unwrap()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
case interface{ Unwrap() []error }:
|
||||
for _, ue := range e.Unwrap() {
|
||||
if fe := firstError(ue); fe != nil {
|
||||
return fe
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case interface{ Is(error) bool }:
|
||||
for _, target := range []error{ErrUnknown,
|
||||
ErrInvalidArgument,
|
||||
ErrNotFound,
|
||||
ErrAlreadyExists,
|
||||
ErrPermissionDenied,
|
||||
ErrResourceExhausted,
|
||||
ErrFailedPrecondition,
|
||||
ErrConflict,
|
||||
ErrNotModified,
|
||||
ErrAborted,
|
||||
ErrOutOfRange,
|
||||
ErrNotImplemented,
|
||||
ErrInternal,
|
||||
ErrUnavailable,
|
||||
ErrDataLoss,
|
||||
ErrUnauthenticated,
|
||||
context.DeadlineExceeded,
|
||||
context.Canceled} {
|
||||
if e.Is(target) {
|
||||
return target
|
||||
}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
8
e2e/vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
8
e2e/vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
@ -143,10 +143,10 @@ func (ch *channel) recv() (messageHeader, []byte, error) {
|
||||
}
|
||||
|
||||
func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error {
|
||||
// TODO: Error on send rather than on recv
|
||||
//if len(p) > messageLengthMax {
|
||||
// return status.Errorf(codes.InvalidArgument, "refusing to send, message length %v exceed maximum message size of %v", len(p), messageLengthMax)
|
||||
//}
|
||||
if len(p) > messageLengthMax {
|
||||
return OversizedMessageError(len(p))
|
||||
}
|
||||
|
||||
if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
48
e2e/vendor/github.com/containerd/ttrpc/errors.go
generated
vendored
48
e2e/vendor/github.com/containerd/ttrpc/errors.go
generated
vendored
@ -16,7 +16,12 @@
|
||||
|
||||
package ttrpc
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrProtocol is a general error in the handling the protocol.
|
||||
@ -32,3 +37,44 @@ var (
|
||||
// ErrStreamClosed is when the streaming connection is closed.
|
||||
ErrStreamClosed = errors.New("ttrpc: stream closed")
|
||||
)
|
||||
|
||||
// OversizedMessageErr is used to indicate refusal to send an oversized message.
|
||||
// It wraps a ResourceExhausted grpc Status together with the offending message
|
||||
// length.
|
||||
type OversizedMessageErr struct {
|
||||
messageLength int
|
||||
err error
|
||||
}
|
||||
|
||||
// OversizedMessageError returns an OversizedMessageErr error for the given message
|
||||
// length if it exceeds the allowed maximum. Otherwise a nil error is returned.
|
||||
func OversizedMessageError(messageLength int) error {
|
||||
if messageLength <= messageLengthMax {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &OversizedMessageErr{
|
||||
messageLength: messageLength,
|
||||
err: status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", messageLength, messageLengthMax),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the error message for the corresponding grpc Status for the error.
|
||||
func (e *OversizedMessageErr) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the corresponding error with our grpc status code.
|
||||
func (e *OversizedMessageErr) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// RejectedLength retrieves the rejected message length which triggered the error.
|
||||
func (e *OversizedMessageErr) RejectedLength() int {
|
||||
return e.messageLength
|
||||
}
|
||||
|
||||
// MaximumLength retrieves the maximum allowed message length that triggered the error.
|
||||
func (*OversizedMessageErr) MaximumLength() int {
|
||||
return messageLengthMax
|
||||
}
|
||||
|
2
e2e/vendor/github.com/containerd/typeurl/v2/.gitignore
generated
vendored
Normal file
2
e2e/vendor/github.com/containerd/typeurl/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.test
|
||||
coverage.txt
|
@ -1,7 +1,7 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@ -176,24 +176,13 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
26
e2e/vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
Normal file
26
e2e/vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# typeurl
|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/typeurl)
|
||||
[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
||||
[](https://codecov.io/gh/containerd/typeurl)
|
||||
[](https://goreportcard.com/report/github.com/containerd/typeurl)
|
||||
|
||||
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||
|
||||
This package helps when types are sent over a ttrpc/GRPC API and marshaled as a protobuf [Any](https://pkg.go.dev/google.golang.org/protobuf@v1.27.1/types/known/anypb#Any)
|
||||
|
||||
## Project details
|
||||
|
||||
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Optional
|
||||
|
||||
By default, support for gogoproto is available along side the standard Google
|
||||
protobuf types.
|
||||
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
83
e2e/vendor/github.com/containerd/typeurl/v2/doc.go
generated
vendored
Normal file
83
e2e/vendor/github.com/containerd/typeurl/v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
// Package typeurl assists with managing the registration, marshaling, and
|
||||
// unmarshaling of types encoded as protobuf.Any.
|
||||
//
|
||||
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
||||
// consists of two components, a TypeUrl and a Value, and its proto definition
|
||||
// looks like this:
|
||||
//
|
||||
// message Any {
|
||||
// string type_url = 1;
|
||||
// bytes value = 2;
|
||||
// }
|
||||
//
|
||||
// The TypeUrl is used to distinguish the contents from other proto.Any
|
||||
// messages. This typeurl library manages these URLs to enable automagic
|
||||
// marshaling and unmarshaling of the contents.
|
||||
//
|
||||
// For example, consider this go struct:
|
||||
//
|
||||
// type Foo struct {
|
||||
// Field1 string
|
||||
// Field2 string
|
||||
// }
|
||||
//
|
||||
// To use typeurl, types must first be registered. This is typically done in
|
||||
// the init function
|
||||
//
|
||||
// func init() {
|
||||
// typeurl.Register(&Foo{}, "Foo")
|
||||
// }
|
||||
//
|
||||
// This will register the type Foo with the url path "Foo". The arguments to
|
||||
// Register are variadic, and are used to construct a url path. Consider this
|
||||
// example, from the github.com/containerd/containerd/client package:
|
||||
//
|
||||
// func init() {
|
||||
// const prefix = "types.containerd.io"
|
||||
// // register TypeUrls for commonly marshaled external types
|
||||
// major := strconv.Itoa(specs.VersionMajor)
|
||||
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||
// // this function has more Register calls, which are elided.
|
||||
// }
|
||||
//
|
||||
// This registers several types under a more complex url, which ends up mapping
|
||||
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
||||
// value for major).
|
||||
//
|
||||
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
||||
// by calling `MarshalAny`, like this:
|
||||
//
|
||||
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
||||
// anyFoo, err := typeurl.MarshalAny(foo)
|
||||
//
|
||||
// MarshalAny will resolve the correct URL for the type. If the type in
|
||||
// question implements the proto.Message interface, then it will be marshaled
|
||||
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
||||
// typeurl will work on any arbitrary data, whether or not it has a proto
|
||||
// definition, as long as it can be serialized to json.
|
||||
//
|
||||
// To unmarshal, the process is simply inverse:
|
||||
//
|
||||
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
||||
// foo := iface.(*Foo)
|
||||
//
|
||||
// The correct type is automatically chosen from the type registry, and the
|
||||
// returned interface can be cast straight to that type.
|
307
e2e/vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
Normal file
307
e2e/vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
handlers []handler
|
||||
)
|
||||
|
||||
type handler interface {
|
||||
Marshaller(interface{}) func() ([]byte, error)
|
||||
Unmarshaller(interface{}) func([]byte) error
|
||||
TypeURL(interface{}) string
|
||||
GetType(url string) reflect.Type
|
||||
}
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
// to an error.
|
||||
//
|
||||
// To detect an error class, use errors.Is() functions to tell whether an
|
||||
// error is of this type.
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
// Any contains an arbitrary protcol buffer message along with its type.
|
||||
//
|
||||
// While there is google.golang.org/protobuf/types/known/anypb.Any,
|
||||
// we'd like to have our own to hide the underlying protocol buffer
|
||||
// implementations from containerd clients.
|
||||
//
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#any
|
||||
type Any interface {
|
||||
// GetTypeUrl returns a URL/resource name that uniquely identifies
|
||||
// the type of the serialized protocol buffer message.
|
||||
GetTypeUrl() string
|
||||
|
||||
// GetValue returns a valid serialized protocol buffer of the type that
|
||||
// GetTypeUrl() indicates.
|
||||
GetValue() []byte
|
||||
}
|
||||
|
||||
type anyType struct {
|
||||
typeURL string
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (a *anyType) GetTypeUrl() string {
|
||||
if a == nil {
|
||||
return ""
|
||||
}
|
||||
return a.typeURL
|
||||
}
|
||||
|
||||
func (a *anyType) GetValue() []byte {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.value
|
||||
}
|
||||
|
||||
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
||||
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
||||
// To use protocol buffers for handling the Any value the proto.Register
|
||||
// function should be used instead of this function.
|
||||
func Register(v interface{}, args ...string) {
|
||||
var (
|
||||
t = tryDereference(v)
|
||||
p = path.Join(args...)
|
||||
)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if et, ok := registry[t]; ok {
|
||||
if et != p {
|
||||
panic(fmt.Errorf("type registered with alternate path %q != %q", et, p))
|
||||
}
|
||||
return
|
||||
}
|
||||
registry[t] = p
|
||||
}
|
||||
|
||||
// TypeURL returns the type url for a registered type.
|
||||
func TypeURL(v interface{}) (string, error) {
|
||||
mu.RLock()
|
||||
u, ok := registry[tryDereference(v)]
|
||||
mu.RUnlock()
|
||||
if !ok {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if u := h.TypeURL(v); u != "" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Is returns true if the type of the Any is the same as v.
|
||||
func Is(any Any, v interface{}) bool {
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
// call to check that v is a pointer
|
||||
tryDereference(v)
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return any.GetTypeUrl() == url
|
||||
}
|
||||
|
||||
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
||||
// If the provided object is already a proto.Any message, then it will be
|
||||
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
||||
// protocol buffer. Otherwise, the object will be marshaled to json.
|
||||
func MarshalAny(v interface{}) (Any, error) {
|
||||
var marshal func(v interface{}) ([]byte, error)
|
||||
switch t := v.(type) {
|
||||
case Any:
|
||||
// avoid reserializing the type if we have an any.
|
||||
return t, nil
|
||||
case proto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if m := h.Marshaller(v); m != nil {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return m()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if marshal == nil {
|
||||
marshal = json.Marshal
|
||||
}
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anyType{
|
||||
typeURL: url,
|
||||
value: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the any type into a concrete type.
|
||||
func UnmarshalAny(any Any) (interface{}, error) {
|
||||
return UnmarshalByTypeURL(any.GetTypeUrl(), any.GetValue())
|
||||
}
|
||||
|
||||
// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
|
||||
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
||||
return unmarshal(typeURL, value, nil)
|
||||
}
|
||||
|
||||
// UnmarshalTo unmarshals the any type into a concrete type passed in the out
|
||||
// argument. It is identical to UnmarshalAny, but lets clients provide a
|
||||
// destination type through the out argument.
|
||||
func UnmarshalTo(any Any, out interface{}) error {
|
||||
return UnmarshalToByTypeURL(any.GetTypeUrl(), any.GetValue(), out)
|
||||
}
|
||||
|
||||
// UnmarshalToByTypeURL unmarshals the given type and value into a concrete type passed
|
||||
// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
|
||||
// provide a destination type through the out argument.
|
||||
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||
_, err := unmarshal(typeURL, value, out)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalProto(from Any) *anypb.Any {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pbany, ok := from.(*anypb.Any); ok {
|
||||
return pbany
|
||||
}
|
||||
|
||||
return &anypb.Any{
|
||||
TypeUrl: from.GetTypeUrl(),
|
||||
Value: from.GetValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
|
||||
func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
anyType, err := MarshalAny(from)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return MarshalProto(anyType), nil
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typeURL != vURL {
|
||||
return nil, fmt.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
||||
}
|
||||
}
|
||||
|
||||
pm, ok := v.(proto.Message)
|
||||
if ok {
|
||||
return v, proto.Unmarshal(value, pm)
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||
return v, unmarshal(value)
|
||||
}
|
||||
}
|
||||
|
||||
// fallback to json unmarshaller
|
||||
return v, json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (reflect.Type, error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
for _, h := range handlers {
|
||||
if t := h.GetType(url); t != nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
}
|
||||
empty := mt.New().Interface()
|
||||
return reflect.TypeOf(empty).Elem(), nil
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
// require check of pointer but dereference to register
|
||||
return t.Elem()
|
||||
}
|
||||
panic("v is not a pointer to a type")
|
||||
}
|
68
e2e/vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
68
e2e/vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
//go:build !no_gogo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
handlers = append(handlers, gogoHandler{})
|
||||
}
|
||||
|
||||
type gogoHandler struct{}
|
||||
|
||||
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
return gogoproto.Marshal(pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return func(dt []byte) error {
|
||||
return gogoproto.Unmarshal(dt, pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) TypeURL(v interface{}) string {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return gogoproto.MessageName(pm)
|
||||
}
|
||||
|
||||
func (gogoHandler) GetType(url string) reflect.Type {
|
||||
t := gogoproto.MessageType(url)
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.Elem()
|
||||
}
|
202
e2e/vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
202
e2e/vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
e2e/vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
5
e2e/vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
@ -1,5 +0,0 @@
|
||||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
296
e2e/vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
296
e2e/vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
@ -1,296 +0,0 @@
|
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(metadata); err != nil {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||
if aInt && !bInt {
|
||||
return -1
|
||||
} else if !aInt && bInt {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error {
|
||||
if id != "" && !reIdentifier.MatchString(id) {
|
||||
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
38
e2e/vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
38
e2e/vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
84
e2e/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
generated
vendored
84
e2e/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
// Copyright 2014 Docker, Inc.
|
||||
// Copyright 2015-2018 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// Package daemon provides a Go implementation of the sd_notify protocol.
|
||||
// It can be used to inform systemd of service start-up completion, watchdog
|
||||
// events, and other status changes.
|
||||
//
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// SdNotifyReady tells the service manager that service startup is finished
|
||||
// or the service finished loading its configuration.
|
||||
SdNotifyReady = "READY=1"
|
||||
|
||||
// SdNotifyStopping tells the service manager that the service is beginning
|
||||
// its shutdown.
|
||||
SdNotifyStopping = "STOPPING=1"
|
||||
|
||||
// SdNotifyReloading tells the service manager that this service is
|
||||
// reloading its configuration. Note that you must call SdNotifyReady when
|
||||
// it completed reloading.
|
||||
SdNotifyReloading = "RELOADING=1"
|
||||
|
||||
// SdNotifyWatchdog tells the service manager to update the watchdog
|
||||
// timestamp for the service.
|
||||
SdNotifyWatchdog = "WATCHDOG=1"
|
||||
)
|
||||
|
||||
// SdNotify sends a message to the init daemon. It is common to ignore the error.
|
||||
// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
|
||||
// will be unconditionally unset.
|
||||
//
|
||||
// It returns one of the following:
|
||||
// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
|
||||
// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
|
||||
// (true, nil) - notification supported, data has been sent
|
||||
func SdNotify(unsetEnvironment bool, state string) (bool, error) {
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: os.Getenv("NOTIFY_SOCKET"),
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
// NOTIFY_SOCKET not set
|
||||
if socketAddr.Name == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if unsetEnvironment {
|
||||
if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
|
||||
// Error connecting to NOTIFY_SOCKET
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
if _, err = conn.Write([]byte(state)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
73
e2e/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
generated
vendored
73
e2e/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SdWatchdogEnabled returns watchdog information for a service.
|
||||
// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every
|
||||
// time / 2.
|
||||
// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and
|
||||
// `WATCHDOG_PID` will be unconditionally unset.
|
||||
//
|
||||
// It returns one of the following:
|
||||
// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
|
||||
// (0, err) - an error happened (e.g. error converting time).
|
||||
// (time, nil) - watchdog is enabled and we can send ping. time is delay
|
||||
// before inactive service will be killed.
|
||||
func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
|
||||
wusec := os.Getenv("WATCHDOG_USEC")
|
||||
wpid := os.Getenv("WATCHDOG_PID")
|
||||
if unsetEnvironment {
|
||||
wusecErr := os.Unsetenv("WATCHDOG_USEC")
|
||||
wpidErr := os.Unsetenv("WATCHDOG_PID")
|
||||
if wusecErr != nil {
|
||||
return 0, wusecErr
|
||||
}
|
||||
if wpidErr != nil {
|
||||
return 0, wpidErr
|
||||
}
|
||||
}
|
||||
|
||||
if wusec == "" {
|
||||
return 0, nil
|
||||
}
|
||||
s, err := strconv.Atoi(wusec)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
|
||||
}
|
||||
if s <= 0 {
|
||||
return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
|
||||
}
|
||||
interval := time.Duration(s) * time.Microsecond
|
||||
|
||||
if wpid == "" {
|
||||
return interval, nil
|
||||
}
|
||||
p, err := strconv.Atoi(wpid)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
|
||||
}
|
||||
if os.Getpid() != p {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return interval, nil
|
||||
}
|
46
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
generated
vendored
46
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
267
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
267
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
generated
vendored
@ -1,267 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// This can be overridden at build-time:
|
||||
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||
journalSocket = "/run/systemd/journal/socket"
|
||||
|
||||
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||
// Concrete safe pointer type: *net.UnixConn
|
||||
unixConnPtr unsafe.Pointer
|
||||
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||
onceConn sync.Once
|
||||
)
|
||||
|
||||
// Enabled checks whether the local systemd journal is available for logging.
|
||||
func Enabled() bool {
|
||||
if c := getOrInitConn(); c == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
conn, err := net.Dial("unixgram", journalSocket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// StderrIsJournalStream returns whether the process stderr is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stderr's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stderr)
|
||||
}
|
||||
|
||||
// StdoutIsJournalStream returns whether the process stdout is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stdout's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// Most users should probably use [StderrIsJournalStream].
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stdout)
|
||||
}
|
||||
|
||||
func fdIsJournalStream(fd int) (bool, error) {
|
||||
journalStream := os.Getenv("JOURNAL_STREAM")
|
||||
if journalStream == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var expectedStat syscall.Stat_t
|
||||
_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
err = syscall.Fstat(fd, &stat)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
conn := getOrInitConn()
|
||||
if conn == nil {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: journalSocket,
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !isSocketSpaceError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Large log entry, send it via tempfile and ancillary-fd.
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
|
||||
func getOrInitConn() *net.UnixConn {
|
||||
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
if conn != nil {
|
||||
return conn
|
||||
}
|
||||
onceConn.Do(initConn)
|
||||
return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if err := validVarName(name); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// validVarName validates a variable name to make sure journald will accept it.
|
||||
// The variable name must be in uppercase and consist only of characters,
|
||||
// numbers and underscores, and may not begin with an underscore:
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||
func validVarName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("Empty variable name")
|
||||
} else if name[0] == '_' {
|
||||
return errors.New("Variable name begins with an underscore")
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||
return errors.New("Variable name contains invalid characters")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSocketSpaceError checks whether the error is signaling
|
||||
// an "overlarge message" condition.
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok || opErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||
if !ok || sysErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// initConn initializes the global `unixConnPtr` socket.
|
||||
// It is automatically called when needed.
|
||||
func initConn() {
|
||||
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||
}
|
43
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
43
e2e/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
82
e2e/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
generated
vendored
82
e2e/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
generated
vendored
@ -6,6 +6,80 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## [Unreleased] ##
|
||||
|
||||
## [0.4.1] - 2025-01-28 ##
|
||||
|
||||
### Fixed ###
|
||||
- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was
|
||||
found to be too strict and caused some regressions when folks tried to
|
||||
update, so this restriction has been relaxed to only return an error if the
|
||||
path contains a `..` component. We still recommend users use `filepath.Clean`
|
||||
(and even `filepath.EvalSymlinks`) on the `root` path they are using, but at
|
||||
least you will no longer be punished for "trivial" unclean paths.
|
||||
|
||||
## [0.4.0] - 2025-01-13 ##
|
||||
|
||||
### Breaking ####
|
||||
- `SecureJoin(VFS)` will now return an error if the provided `root` is not a
|
||||
`filepath.Clean`'d path.
|
||||
|
||||
While it is ultimately the responsibility of the caller to ensure the root is
|
||||
a safe path to use, passing a path like `/symlink/..` as a root would result
|
||||
in the `SecureJoin`'d path being placed in `/` even though `/symlink/..`
|
||||
might be a different directory, and so we should more strongly discourage
|
||||
such usage.
|
||||
|
||||
All major users of `securejoin.SecureJoin` already ensure that the paths they
|
||||
provide are safe (and this is ultimately a question of user error), but
|
||||
removing this foot-gun is probably a good idea. Of course, this is
|
||||
necessarily a breaking API change (though we expect no real users to be
|
||||
affected by it).
|
||||
|
||||
Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially
|
||||
reported this issue as a possible security issue.
|
||||
|
||||
- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument
|
||||
instead of a raw `unix.S_*`-style mode argument, which may cause compile-time
|
||||
type errors depending on how you use `filepath-securejoin`. For most users,
|
||||
there will be no change in behaviour aside from the type change (as the
|
||||
bottom `0o777` bits are the same in both formats, and most users are probably
|
||||
only using those bits).
|
||||
|
||||
However, if you were using `unix.S_ISVTX` to set the sticky bit with
|
||||
`MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you
|
||||
will get a runtime error with this update. In addition, the error message you
|
||||
will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as
|
||||
they are treated as invalid bits now (note that previously passing said bits
|
||||
was also an error).
|
||||
|
||||
## [0.3.6] - 2024-12-17 ##
|
||||
|
||||
### Compatibility ###
|
||||
- The minimum Go version requirement for `filepath-securejoin` is now Go 1.18
|
||||
(we use generics internally).
|
||||
|
||||
For reference, `filepath-securejoin@v0.3.0` somewhat-arbitrarily bumped the
|
||||
Go version requirement to 1.21.
|
||||
|
||||
While we did make some use of Go 1.21 stdlib features (and in principle Go
|
||||
versions <= 1.21 are no longer even supported by upstream anymore), some
|
||||
downstreams have complained that the version bump has meant that they have to
|
||||
do workarounds when backporting fixes that use the new `filepath-securejoin`
|
||||
API onto old branches. This is not an ideal situation, but since using this
|
||||
library is probably better for most downstreams than a hand-rolled
|
||||
workaround, we now have compatibility shims that allow us to build on older
|
||||
Go versions.
|
||||
- Lower minimum version requirement for `golang.org/x/sys` to `v0.18.0` (we
|
||||
need the wrappers for `fsconfig(2)`), which should also make backporting
|
||||
patches to older branches easier.
|
||||
|
||||
## [0.3.5] - 2024-12-06 ##
|
||||
|
||||
### Fixed ###
|
||||
- `MkdirAll` will now no longer return an `EEXIST` error if two racing
|
||||
processes are creating the same directory. We will still verify that the path
|
||||
is a directory, but this will avoid spurious errors when multiple threads or
|
||||
programs are trying to `MkdirAll` the same path. opencontainers/runc#4543
|
||||
|
||||
## [0.3.4] - 2024-10-09 ##
|
||||
|
||||
### Fixed ###
|
||||
@ -164,8 +238,12 @@ This is our first release of `github.com/cyphar/filepath-securejoin`,
|
||||
containing a full implementation with a coverage of 93.5% (the only missing
|
||||
cases are the error cases, which are hard to mocktest at the moment).
|
||||
|
||||
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...HEAD
|
||||
[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4
|
||||
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD
|
||||
[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1
|
||||
[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0
|
||||
[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6
|
||||
[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5
|
||||
[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4
|
||||
[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3
|
||||
[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2
|
||||
[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1
|
||||
|
2
e2e/vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
2
e2e/vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
@ -1 +1 @@
|
||||
0.3.4
|
||||
0.4.1
|
||||
|
18
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go
generated
vendored
Normal file
18
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
//go:build linux && go1.20
|
||||
|
||||
// Copyright (C) 2024 SUSE LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securejoin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
|
||||
// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
|
||||
// is only guaranteed to give you baseErr.
|
||||
func wrapBaseError(baseErr, extraErr error) error {
|
||||
return fmt.Errorf("%w: %w", extraErr, baseErr)
|
||||
}
|
38
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go
generated
vendored
Normal file
38
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
//go:build linux && !go1.20
|
||||
|
||||
// Copyright (C) 2024 SUSE LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securejoin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type wrappedError struct {
|
||||
inner error
|
||||
isError error
|
||||
}
|
||||
|
||||
func (err wrappedError) Is(target error) bool {
|
||||
return err.isError == target
|
||||
}
|
||||
|
||||
func (err wrappedError) Unwrap() error {
|
||||
return err.inner
|
||||
}
|
||||
|
||||
func (err wrappedError) Error() string {
|
||||
return fmt.Sprintf("%v: %v", err.isError, err.inner)
|
||||
}
|
||||
|
||||
// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
|
||||
// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
|
||||
// is only guaranteed to give you baseErr.
|
||||
func wrapBaseError(baseErr, extraErr error) error {
|
||||
return wrappedError{
|
||||
inner: baseErr,
|
||||
isError: extraErr,
|
||||
}
|
||||
}
|
32
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go
generated
vendored
Normal file
32
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
//go:build linux && go1.21
|
||||
|
||||
// Copyright (C) 2024 SUSE LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securejoin
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S {
|
||||
return slices.DeleteFunc(slice, delFn)
|
||||
}
|
||||
|
||||
func slices_Contains[S ~[]E, E comparable](slice S, val E) bool {
|
||||
return slices.Contains(slice, val)
|
||||
}
|
||||
|
||||
func slices_Clone[S ~[]E, E any](slice S) S {
|
||||
return slices.Clone(slice)
|
||||
}
|
||||
|
||||
func sync_OnceValue[T any](f func() T) func() T {
|
||||
return sync.OnceValue(f)
|
||||
}
|
||||
|
||||
func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
|
||||
return sync.OnceValues(f)
|
||||
}
|
124
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go
generated
vendored
Normal file
124
e2e/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
//go:build linux && !go1.21
|
||||
|
||||
// Copyright (C) 2024 SUSE LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securejoin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// These are very minimal implementations of functions that appear in Go 1.21's
|
||||
// stdlib, included so that we can build on older Go versions. Most are
|
||||
// borrowed directly from the stdlib, and a few are modified to be "obviously
|
||||
// correct" without needing to copy too many other helpers.
|
||||
|
||||
// clearSlice is equivalent to the builtin clear from Go 1.21.
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func clearSlice[S ~[]E, E any](slice S) {
|
||||
var zero E
|
||||
for i := range slice {
|
||||
slice[i] = zero
|
||||
}
|
||||
}
|
||||
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
|
||||
for i := range s {
|
||||
if f(s[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
|
||||
i := slices_IndexFunc(s, del)
|
||||
if i == -1 {
|
||||
return s
|
||||
}
|
||||
// Don't start copying elements until we find one to delete.
|
||||
for j := i + 1; j < len(s); j++ {
|
||||
if v := s[j]; !del(v) {
|
||||
s[i] = v
|
||||
i++
|
||||
}
|
||||
}
|
||||
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// Similar to the stdlib slices.Contains, except that we don't have
|
||||
// slices.Index so we need to use slices.IndexFunc for this non-Func helper.
|
||||
func slices_Contains[S ~[]E, E comparable](s S, v E) bool {
|
||||
return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0
|
||||
}
|
||||
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func slices_Clone[S ~[]E, E any](s S) S {
|
||||
// Preserve nil in case it matters.
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return append(S([]E{}), s...)
|
||||
}
|
||||
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func sync_OnceValue[T any](f func() T) func() T {
|
||||
var (
|
||||
once sync.Once
|
||||
valid bool
|
||||
p any
|
||||
result T
|
||||
)
|
||||
g := func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
}()
|
||||
result = f()
|
||||
f = nil
|
||||
valid = true
|
||||
}
|
||||
return func() T {
|
||||
once.Do(g)
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Copied from the Go 1.24 stdlib implementation.
|
||||
func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
|
||||
var (
|
||||
once sync.Once
|
||||
valid bool
|
||||
p any
|
||||
r1 T1
|
||||
r2 T2
|
||||
)
|
||||
g := func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
}()
|
||||
r1, r2 = f()
|
||||
f = nil
|
||||
valid = true
|
||||
}
|
||||
return func() (T1, T2) {
|
||||
once.Do(g)
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
return r1, r2
|
||||
}
|
||||
}
|
49
e2e/vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
49
e2e/vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
|
||||
// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
|
||||
// Copyright (C) 2017-2025 SUSE LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
@ -24,6 +24,31 @@ func IsNotExist(err error) bool {
|
||||
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
|
||||
}
|
||||
|
||||
// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path
|
||||
// that contains ".." components.
|
||||
var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components")
|
||||
|
||||
// stripVolume just gets rid of the Windows volume included in a path. Based on
|
||||
// some godbolt tests, the Go compiler is smart enough to make this a no-op on
|
||||
// Linux.
|
||||
func stripVolume(path string) string {
|
||||
return path[len(filepath.VolumeName(path)):]
|
||||
}
|
||||
|
||||
// hasDotDot checks if the path contains ".." components in a platform-agnostic
|
||||
// way.
|
||||
func hasDotDot(path string) bool {
|
||||
// If we are on Windows, strip any volume letters. It turns out that
|
||||
// C:..\foo may (or may not) be a valid pathname and we need to handle that
|
||||
// leading "..".
|
||||
path = stripVolume(path)
|
||||
// Look for "/../" in the path, but we need to handle leading and trailing
|
||||
// ".."s by adding separators. Doing this with filepath.Separator is ugly
|
||||
// so just convert to Unix-style "/" first.
|
||||
path = filepath.ToSlash(path)
|
||||
return strings.Contains("/"+path+"/", "/../")
|
||||
}
|
||||
|
||||
// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
|
||||
// that the returned path is guaranteed to be scoped inside the provided root
|
||||
// path (when evaluated). Any symbolic links in the path are evaluated with the
|
||||
@ -46,7 +71,22 @@ func IsNotExist(err error) bool {
|
||||
// provided via direct input or when evaluating symlinks. Therefore:
|
||||
//
|
||||
// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
|
||||
//
|
||||
// If the provided root is not [filepath.Clean] then an error will be returned,
|
||||
// as such root paths are bordering on somewhat unsafe and using such paths is
|
||||
// not best practice. We also strongly suggest that any root path is first
|
||||
// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to
|
||||
// avoid containing symlink components. Of course, the root also *must not* be
|
||||
// attacker-controlled.
|
||||
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
|
||||
// The root path must not contain ".." components, otherwise when we join
|
||||
// the subpath we will end up with a weird path. We could work around this
|
||||
// in other ways but users shouldn't be giving us non-lexical root paths in
|
||||
// the first place.
|
||||
if hasDotDot(root) {
|
||||
return "", errUnsafeRoot
|
||||
}
|
||||
|
||||
// Use the os.* VFS implementation if none was specified.
|
||||
if vfs == nil {
|
||||
vfs = osVFS{}
|
||||
@ -59,9 +99,10 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
|
||||
linksWalked int
|
||||
)
|
||||
for remainingPath != "" {
|
||||
if v := filepath.VolumeName(remainingPath); v != "" {
|
||||
remainingPath = remainingPath[len(v):]
|
||||
}
|
||||
// On Windows, if we managed to end up at a path referencing a volume,
|
||||
// drop the volume to make sure we don't end up with broken paths or
|
||||
// escaping the root volume.
|
||||
remainingPath = stripVolume(remainingPath)
|
||||
|
||||
// Get the next path component.
|
||||
var part string
|
||||
|
3
e2e/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
generated
vendored
3
e2e/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@ -113,7 +112,7 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro
|
||||
return nil
|
||||
}
|
||||
// Split the link target and clean up any "" parts.
|
||||
linkTargetParts := slices.DeleteFunc(
|
||||
linkTargetParts := slices_DeleteFunc(
|
||||
strings.Split(linkTarget, "/"),
|
||||
func(part string) bool { return part == "" || part == "." })
|
||||
|
||||
|
65
e2e/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
generated
vendored
65
e2e/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
generated
vendored
@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@ -22,6 +21,33 @@ var (
|
||||
errPossibleAttack = errors.New("possible attack detected")
|
||||
)
|
||||
|
||||
// modePermExt is like os.ModePerm except that it also includes the set[ug]id
|
||||
// and sticky bits.
|
||||
const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
|
||||
|
||||
//nolint:cyclop // this function needs to handle a lot of cases
|
||||
func toUnixMode(mode os.FileMode) (uint32, error) {
|
||||
sysMode := uint32(mode.Perm())
|
||||
if mode&os.ModeSetuid != 0 {
|
||||
sysMode |= unix.S_ISUID
|
||||
}
|
||||
if mode&os.ModeSetgid != 0 {
|
||||
sysMode |= unix.S_ISGID
|
||||
}
|
||||
if mode&os.ModeSticky != 0 {
|
||||
sysMode |= unix.S_ISVTX
|
||||
}
|
||||
// We don't allow file type bits.
|
||||
if mode&os.ModeType != 0 {
|
||||
return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode)
|
||||
}
|
||||
// We don't allow other unknown modes.
|
||||
if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 {
|
||||
return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode)
|
||||
}
|
||||
return sysMode, nil
|
||||
}
|
||||
|
||||
// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use
|
||||
// in two respects:
|
||||
//
|
||||
@ -40,17 +66,17 @@ var (
|
||||
// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after
|
||||
// doing [MkdirAll]. If you intend to open the directory after creating it, you
|
||||
// should use MkdirAllHandle.
|
||||
func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) {
|
||||
// Make sure there are no os.FileMode bits set.
|
||||
if mode&^0o7777 != 0 {
|
||||
return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode)
|
||||
func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) {
|
||||
unixMode, err := toUnixMode(mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid
|
||||
// bits. We could also silently ignore them but since we have very few
|
||||
// users it seems more prudent to return an error so users notice that
|
||||
// these bits will not be set.
|
||||
if mode&^0o1777 != 0 {
|
||||
return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
|
||||
if unixMode&^0o1777 != 0 {
|
||||
return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
|
||||
}
|
||||
|
||||
// Try to open as much of the path as possible.
|
||||
@ -93,7 +119,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
|
||||
}
|
||||
|
||||
remainingParts := strings.Split(remainingPath, string(filepath.Separator))
|
||||
if slices.Contains(remainingParts, "..") {
|
||||
if slices_Contains(remainingParts, "..") {
|
||||
// The path contained ".." components after the end of the "real"
|
||||
// components. We could try to safely resolve ".." here but that would
|
||||
// add a bunch of extra logic for something that it's not clear even
|
||||
@ -105,9 +131,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
|
||||
return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath)
|
||||
}
|
||||
|
||||
// Make sure the mode doesn't have any type bits.
|
||||
mode &^= unix.S_IFMT
|
||||
|
||||
// Create the remaining components.
|
||||
for _, part := range remainingParts {
|
||||
switch part {
|
||||
@ -119,11 +142,20 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
|
||||
// NOTE: mkdir(2) will not follow trailing symlinks, so we can safely
|
||||
// create the final component without worrying about symlink-exchange
|
||||
// attacks.
|
||||
if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil {
|
||||
//
|
||||
// If we get -EEXIST, it's possible that another program created the
|
||||
// directory at the same time as us. In that case, just continue on as
|
||||
// if we created it (if the created inode is not a directory, the
|
||||
// following open call will fail).
|
||||
if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) {
|
||||
err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err}
|
||||
// Make the error a bit nicer if the directory is dead.
|
||||
if err2 := isDeadInode(currentDir); err2 != nil {
|
||||
err = fmt.Errorf("%w (%w)", err, err2)
|
||||
if deadErr := isDeadInode(currentDir); deadErr != nil {
|
||||
// TODO: Once we bump the minimum Go version to 1.20, we can use
|
||||
// multiple %w verbs for this wrapping. For now we need to use a
|
||||
// compatibility shim for older Go versions.
|
||||
//err = fmt.Errorf("%w (%w)", err, deadErr)
|
||||
err = wrapBaseError(err, deadErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@ -188,10 +220,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
|
||||
// If you plan to open the directory after you have created it or want to use
|
||||
// an open directory handle as the root, you should use [MkdirAllHandle] instead.
|
||||
// This function is a wrapper around [MkdirAllHandle].
|
||||
//
|
||||
// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not
|
||||
// the Go generic mode bits ([os.FileMode]...).
|
||||
func MkdirAll(root, unsafePath string, mode int) error {
|
||||
func MkdirAll(root, unsafePath string, mode os.FileMode) error {
|
||||
rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
3
e2e/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
generated
vendored
3
e2e/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
generated
vendored
@ -12,12 +12,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var hasOpenat2 = sync.OnceValue(func() bool {
|
||||
var hasOpenat2 = sync_OnceValue(func() bool {
|
||||
fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
|
||||
Flags: unix.O_PATH | unix.O_CLOEXEC,
|
||||
Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
|
||||
|
30
e2e/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
generated
vendored
30
e2e/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@ -54,7 +53,7 @@ func verifyProcRoot(procRoot *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var hasNewMountApi = sync.OnceValue(func() bool {
|
||||
var hasNewMountApi = sync_OnceValue(func() bool {
|
||||
// All of the pieces of the new mount API we use (fsopen, fsconfig,
|
||||
// fsmount, open_tree) were added together in Linux 5.1[1,2], so we can
|
||||
// just check for one of the syscalls and the others should also be
|
||||
@ -192,11 +191,11 @@ func doGetProcRoot() (*os.File, error) {
|
||||
return procRoot, err
|
||||
}
|
||||
|
||||
var getProcRoot = sync.OnceValues(func() (*os.File, error) {
|
||||
var getProcRoot = sync_OnceValues(func() (*os.File, error) {
|
||||
return doGetProcRoot()
|
||||
})
|
||||
|
||||
var hasProcThreadSelf = sync.OnceValue(func() bool {
|
||||
var hasProcThreadSelf = sync_OnceValue(func() bool {
|
||||
return unix.Access("/proc/thread-self/", unix.F_OK) == nil
|
||||
})
|
||||
|
||||
@ -265,12 +264,20 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
|
||||
Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
|
||||
// TODO: Once we bump the minimum Go version to 1.20, we can use
|
||||
// multiple %w verbs for this wrapping. For now we need to use a
|
||||
// compatibility shim for older Go versions.
|
||||
//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
|
||||
return nil, nil, wrapBaseError(err, errUnsafeProcfs)
|
||||
}
|
||||
} else {
|
||||
handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
|
||||
// TODO: Once we bump the minimum Go version to 1.20, we can use
|
||||
// multiple %w verbs for this wrapping. For now we need to use a
|
||||
// compatibility shim for older Go versions.
|
||||
//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
|
||||
return nil, nil, wrapBaseError(err, errUnsafeProcfs)
|
||||
}
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
@ -289,12 +296,17 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
|
||||
return handle, runtime.UnlockOSThread, nil
|
||||
}
|
||||
|
||||
var hasStatxMountId = sync.OnceValue(func() bool {
|
||||
// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to
|
||||
// avoid bumping the requirement for a single constant we can just define it
|
||||
// ourselves.
|
||||
const STATX_MNT_ID_UNIQUE = 0x4000
|
||||
|
||||
var hasStatxMountId = sync_OnceValue(func() bool {
|
||||
var (
|
||||
stx unix.Statx_t
|
||||
// We don't care which mount ID we get. The kernel will give us the
|
||||
// unique one if it is supported.
|
||||
wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
|
||||
wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
|
||||
)
|
||||
err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx)
|
||||
return err == nil && stx.Mask&wantStxMask != 0
|
||||
@ -310,7 +322,7 @@ func getMountId(dir *os.File, path string) (uint64, error) {
|
||||
stx unix.Statx_t
|
||||
// We don't care which mount ID we get. The kernel will give us the
|
||||
// unique one if it is supported.
|
||||
wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
|
||||
wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
|
||||
)
|
||||
|
||||
err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx)
|
||||
|
3
e2e/vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
3
e2e/vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
e2e/vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
3
e2e/vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
28
e2e/vendor/github.com/golang/protobuf/LICENSE
generated
vendored
28
e2e/vendor/github.com/golang/protobuf/LICENSE
generated
vendored
@ -1,28 +0,0 @@
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
324
e2e/vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
324
e2e/vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
@ -1,324 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
63
e2e/vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
63
e2e/vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
113
e2e/vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
113
e2e/vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
58
e2e/vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
58
e2e/vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m != nil {
|
||||
discardUnknown(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func discardUnknown(m protoreflect.Message) {
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
discardUnknown(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
discardUnknown(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
discardUnknown(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Discard unknown fields.
|
||||
if len(m.GetUnknown()) > 0 {
|
||||
m.SetUnknown(nil)
|
||||
}
|
||||
}
|
356
e2e/vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
356
e2e/vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -1,356 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
type (
|
||||
// ExtensionDesc represents an extension descriptor and
|
||||
// is used to interact with an extension field in a message.
|
||||
//
|
||||
// Variables of this type are generated in code by protoc-gen-go.
|
||||
ExtensionDesc = protoimpl.ExtensionInfo
|
||||
|
||||
// ExtensionRange represents a range of message extensions.
|
||||
// Used in code generated by protoc-gen-go.
|
||||
ExtensionRange = protoiface.ExtensionRangeV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
Extension = protoimpl.ExtensionFieldV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||
)
|
||||
|
||||
// ErrMissingExtension reports whether the extension was not present.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
// HasExtension reports whether the extension field is present in m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether any populated known field matches the field number.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
has = mr.Has(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
has = int32(fd.Number()) == xt.Field
|
||||
return !has
|
||||
})
|
||||
}
|
||||
|
||||
// Check whether any unknown field matches the field number.
|
||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
has = int32(num) == xt.Field
|
||||
b = b[n:]
|
||||
}
|
||||
return has
|
||||
}
|
||||
|
||||
// ClearExtension removes the extension field from m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
mr.Clear(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if int32(fd.Number()) == xt.Field {
|
||||
mr.Clear(fd)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from m.
|
||||
// This includes populated fields and unknown fields in the extension range.
|
||||
func ClearAllExtensions(m Message) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
mr.Clear(fd)
|
||||
}
|
||||
return true
|
||||
})
|
||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from m.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Retrieve the unknown fields for this extension field.
|
||||
var bo protoreflect.RawFields
|
||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if int32(num) == xt.Field {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
|
||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||
if xt.ExtensionType == nil {
|
||||
return []byte(bo), nil
|
||||
}
|
||||
|
||||
// If the extension field only exists as unknown fields, unmarshal it.
|
||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
if !mr.Has(xtd) && len(bo) > 0 {
|
||||
m2 := mr.New()
|
||||
if err := (proto.UnmarshalOptions{
|
||||
Resolver: extensionResolver{xt},
|
||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m2.Has(xtd) {
|
||||
mr.Set(xtd, m2.Get(xtd))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether the message has the extension field set or a default.
|
||||
var pv protoreflect.Value
|
||||
switch {
|
||||
case mr.Has(xtd):
|
||||
pv = mr.Get(xtd)
|
||||
case xtd.HasDefault():
|
||||
pv = xtd.Default()
|
||||
default:
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
v := xt.InterfaceOf(pv)
|
||||
rv := reflect.ValueOf(v)
|
||||
if isScalarKind(rv.Kind()) {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// extensionResolver is a custom extension resolver that stores a single
|
||||
// extension type that takes precedence over the global registry.
|
||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||
|
||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
// GetExtensions returns a list of the extensions values present in m,
|
||||
// corresponding with the provided list of extension descriptors, xts.
|
||||
// If an extension is missing in m, the corresponding value is nil.
|
||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
vs := make([]interface{}, len(xts))
|
||||
for i, xt := range xts {
|
||||
v, err := GetExtension(m, xt)
|
||||
if err != nil {
|
||||
if err == ErrMissingExtension {
|
||||
continue
|
||||
}
|
||||
return vs, err
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
// SetExtension sets an extension field in m to the provided value.
|
||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return errNotExtendable
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||
}
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||
}
|
||||
if isScalarKind(rv.Elem().Kind()) {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
mr.Set(xtd, xt.ValueOf(v))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRawExtension inserts b into the unknown fields of m.
|
||||
//
|
||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the raw field is valid.
|
||||
for b0 := b; len(b0) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b0)
|
||||
if int32(num) != fnum {
|
||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||
}
|
||||
b0 = b0[n:]
|
||||
}
|
||||
|
||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||
// containing descriptors for both populated extension fields in m and
|
||||
// also unknown fields of m that are in the extension range.
|
||||
// For the later case, an type incomplete descriptor is provided where only
|
||||
// the ExtensionDesc.Field field is populated.
|
||||
// The order of the extension descriptors is undefined.
|
||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Collect a set of known extension descriptors.
|
||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||
extDescs[fd.Number()] = xd
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Collect a set of unknown extension descriptors.
|
||||
extRanges := mr.Descriptor().ExtensionRanges()
|
||||
for b := mr.GetUnknown(); len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
if extRanges.Has(num) && extDescs[num] == nil {
|
||||
extDescs[num] = nil
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Transpose the set of descriptors into a list.
|
||||
var xts []*ExtensionDesc
|
||||
for num, xt := range extDescs {
|
||||
if xt == nil {
|
||||
xt = &ExtensionDesc{Field: int32(num)}
|
||||
}
|
||||
xts = append(xts, xt)
|
||||
}
|
||||
return xts, nil
|
||||
}
|
||||
|
||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||
}
|
||||
|
||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||
// This function exists for historical reasons since the representation of
|
||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||
func isScalarKind(k reflect.Kind) bool {
|
||||
switch k {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||
func clearUnknown(m protoreflect.Message, remover interface {
|
||||
Has(protoreflect.FieldNumber) bool
|
||||
}) {
|
||||
var bo protoreflect.RawFields
|
||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if !remover.Has(num) {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||
m.SetUnknown(bo)
|
||||
}
|
||||
}
|
||||
|
||||
type fieldNum protoreflect.FieldNumber
|
||||
|
||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||
return protoreflect.FieldNumber(n1) == n2
|
||||
}
|
306
e2e/vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
306
e2e/vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -1,306 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// StructProperties represents protocol buffer type information for a
|
||||
// generated protobuf message in the open-struct API.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type StructProperties struct {
|
||||
// Prop are the properties for each field.
|
||||
//
|
||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||
// single Properties representing the parent oneof held here.
|
||||
//
|
||||
// The order of Prop matches the order of fields in the Go struct.
|
||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||
// in the Properties.Name and must be ignored by the user.
|
||||
Prop []*Properties
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the protobuf field name.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// Properties represents the type information for a protobuf message field.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type Properties struct {
|
||||
// Name is a placeholder name with little meaningful semantic value.
|
||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||
Name string
|
||||
// OrigName is the protobuf field name or oneof name.
|
||||
OrigName string
|
||||
// JSONName is the JSON name for the protobuf field.
|
||||
JSONName string
|
||||
// Enum is a placeholder name for enums.
|
||||
// For historical reasons, this is neither the Go name for the enum,
|
||||
// nor the protobuf name for the enum.
|
||||
Enum string // Deprecated: Do not use.
|
||||
// Weak contains the full name of the weakly referenced message.
|
||||
Weak string
|
||||
// Wire is a string representation of the wire type.
|
||||
Wire string
|
||||
// WireType is the protobuf wire type for the field.
|
||||
WireType int
|
||||
// Tag is the protobuf field number.
|
||||
Tag int
|
||||
// Required reports whether this is a required field.
|
||||
Required bool
|
||||
// Optional reports whether this is a optional field.
|
||||
Optional bool
|
||||
// Repeated reports whether this is a repeated field.
|
||||
Repeated bool
|
||||
// Packed reports whether this is a packed repeated field of scalars.
|
||||
Packed bool
|
||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||
Proto3 bool
|
||||
// Oneof reports whether this field belongs within a oneof.
|
||||
Oneof bool
|
||||
|
||||
// Default is the default value in string form.
|
||||
Default string
|
||||
// HasDefault reports whether the field has a default value.
|
||||
HasDefault bool
|
||||
|
||||
// MapKeyProp is the properties for the key field for a map field.
|
||||
MapKeyProp *Properties
|
||||
// MapValProp is the properties for the value field for a map field.
|
||||
MapValProp *Properties
|
||||
}
|
||||
|
||||
// OneofProperties represents the type information for a protobuf oneof.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type OneofProperties struct {
|
||||
// Type is a pointer to the generated wrapper type for the field value.
|
||||
// This is nil for messages that are not in the open-struct API.
|
||||
Type reflect.Type
|
||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||
Field int
|
||||
// Prop is the properties for the field.
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += "," + strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != "" {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if len(p.Weak) > 0 {
|
||||
s += ",weak=" + p.Weak
|
||||
}
|
||||
if p.Proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.Oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(tag string) {
|
||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||
for len(tag) > 0 {
|
||||
i := strings.IndexByte(tag, ',')
|
||||
if i < 0 {
|
||||
i = len(tag)
|
||||
}
|
||||
switch s := tag[:i]; {
|
||||
case strings.HasPrefix(s, "name="):
|
||||
p.OrigName = s[len("name="):]
|
||||
case strings.HasPrefix(s, "json="):
|
||||
p.JSONName = s[len("json="):]
|
||||
case strings.HasPrefix(s, "enum="):
|
||||
p.Enum = s[len("enum="):]
|
||||
case strings.HasPrefix(s, "weak="):
|
||||
p.Weak = s[len("weak="):]
|
||||
case strings.Trim(s, "0123456789") == "":
|
||||
n, _ := strconv.ParseUint(s, 10, 32)
|
||||
p.Tag = int(n)
|
||||
case s == "opt":
|
||||
p.Optional = true
|
||||
case s == "req":
|
||||
p.Required = true
|
||||
case s == "rep":
|
||||
p.Repeated = true
|
||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||
p.Wire = s
|
||||
p.WireType = WireVarint
|
||||
case s == "fixed32":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed32
|
||||
case s == "fixed64":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed64
|
||||
case s == "bytes":
|
||||
p.Wire = s
|
||||
p.WireType = WireBytes
|
||||
case s == "group":
|
||||
p.Wire = s
|
||||
p.WireType = WireStartGroup
|
||||
case s == "packed":
|
||||
p.Packed = true
|
||||
case s == "proto3":
|
||||
p.Proto3 = true
|
||||
case s == "oneof":
|
||||
p.Oneof = true
|
||||
case strings.HasPrefix(s, "def="):
|
||||
// The default tag is special in that everything afterwards is the
|
||||
// default regardless of the presence of commas.
|
||||
p.HasDefault = true
|
||||
p.Default, i = tag[len("def="):], len(tag)
|
||||
}
|
||||
tag = strings.TrimPrefix(tag[i:], ",")
|
||||
}
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
|
||||
if typ != nil && typ.Kind() == reflect.Map {
|
||||
p.MapKeyProp = new(Properties)
|
||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||
p.MapValProp = new(Properties)
|
||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||
}
|
||||
}
|
||||
|
||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t,
|
||||
// which must be a generated protocol buffer message in the open-struct API,
|
||||
// where protobuf message fields are represented by exported Go struct fields.
|
||||
//
|
||||
// Deprecated: Use protobuf reflection instead.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if p, ok := propertiesCache.Load(t); ok {
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
func newProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
|
||||
var hasOneof bool
|
||||
prop := new(StructProperties)
|
||||
|
||||
// Construct a list of properties for each field in the struct.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
p := new(Properties)
|
||||
f := t.Field(i)
|
||||
tagField := f.Tag.Get("protobuf")
|
||||
p.Init(f.Type, f.Name, tagField, &f)
|
||||
|
||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||
if tagOneof != "" {
|
||||
hasOneof = true
|
||||
p.OrigName = tagOneof
|
||||
}
|
||||
|
||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||
// user code simply checks for this to exclude special fields.
|
||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||
p.Name = "XXX_" + p.Name
|
||||
p.OrigName = "XXX_" + p.OrigName
|
||||
} else if p.Weak != "" {
|
||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||
}
|
||||
|
||||
prop.Prop = append(prop.Prop, p)
|
||||
}
|
||||
|
||||
// Construct a mapping of oneof field names to properties.
|
||||
if hasOneof {
|
||||
var oneofWrappers []interface{}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||
}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||
}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||
}
|
||||
}
|
||||
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, wrapper := range oneofWrappers {
|
||||
p := &OneofProperties{
|
||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
f := p.Type.Elem().Field(0)
|
||||
p.Prop.Name = f.Name
|
||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||
|
||||
// Determine the struct field that contains this oneof.
|
||||
// Each wrapper is assignable to exactly one parent field.
|
||||
var foundOneof bool
|
||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||
p.Field = i
|
||||
foundOneof = true
|
||||
}
|
||||
}
|
||||
if !foundOneof {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
prop.OneofTypes[p.Prop.OrigName] = p
|
||||
}
|
||||
}
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||
func (sp *StructProperties) Swap(i, j int) { return }
|
167
e2e/vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
167
e2e/vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proto provides functionality for handling protocol buffer messages.
|
||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||
// message and the binary wire format.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||
// more information.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtoPackageIsVersion1 = true
|
||||
ProtoPackageIsVersion2 = true
|
||||
ProtoPackageIsVersion3 = true
|
||||
ProtoPackageIsVersion4 = true
|
||||
)
|
||||
|
||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||
// which is a named int32 kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedEnum interface{}
|
||||
|
||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||
// which is a pointer to a named struct kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedMessage interface{}
|
||||
|
||||
// Message is a protocol buffer message.
|
||||
//
|
||||
// This is the v1 version of the message interface and is marginally better
|
||||
// than an empty interface as it lacks any method to programatically interact
|
||||
// with the contents of the message.
|
||||
//
|
||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||
// exposes protobuf reflection as a first-class feature of the interface.
|
||||
//
|
||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||
type Message = protoiface.MessageV1
|
||||
|
||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||
return protoimpl.X.ProtoMessageV1Of(m)
|
||||
}
|
||||
|
||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||
return protoimpl.X.ProtoMessageV2Of(m)
|
||||
}
|
||||
|
||||
// MessageReflect returns a reflective view for a message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageReflect(m Message) protoreflect.Message {
|
||||
return protoimpl.X.MessageOf(m)
|
||||
}
|
||||
|
||||
// Marshaler is implemented by messages that can marshal themselves.
|
||||
// This interface is used by the following functions: Size, Marshal,
|
||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Marshaler interface {
|
||||
// Marshal formats the encoded bytes of the message.
|
||||
// It should be deterministic and emit valid protobuf wire data.
|
||||
// The caller takes ownership of the returned buffer.
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Unmarshaler interface {
|
||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||
// The provided buffer is only valid for during method call.
|
||||
// It should not reset the receiver message.
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Merger is implemented by messages that can merge themselves.
|
||||
// This interface is used by the following functions: Clone and Merge.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Merger interface {
|
||||
// Merge merges the contents of src into the receiver message.
|
||||
// It clones all data structures in src such that it aliases no mutable
|
||||
// memory referenced by src.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// RequiredNotSetError is an error type returned when
|
||||
// marshaling or unmarshaling a message with missing required fields.
|
||||
type RequiredNotSetError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.err != nil {
|
||||
return e.err.Error()
|
||||
}
|
||||
return "proto: required field not set"
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func checkRequiredNotSet(m protoV2.Message) error {
|
||||
if err := protoV2.CheckInitialized(m); err != nil {
|
||||
return &RequiredNotSetError{err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of src.
|
||||
func Clone(src Message) Message {
|
||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||
}
|
||||
|
||||
// Merge merges src into dst, which must be messages of the same type.
|
||||
//
|
||||
// Populated scalar fields in src are copied to dst, while populated
|
||||
// singular messages in src are merged into dst by recursively calling Merge.
|
||||
// The elements of every list field in src is appended to the corresponded
|
||||
// list fields in dst. The entries of every map field in src is copied into
|
||||
// the corresponding map field in dst, possibly replacing existing entries.
|
||||
// The unknown fields of src are appended to the unknown fields of dst.
|
||||
func Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Equal reports whether two messages are equal.
|
||||
// If two messages marshal to the same bytes under deterministic serialization,
|
||||
// then Equal is guaranteed to report true.
|
||||
//
|
||||
// Two messages are equal if they are the same protobuf message type,
|
||||
// have the same set of populated known and extension field values,
|
||||
// and the same set of unknown fields values.
|
||||
//
|
||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||
// except bytes values which are compared using bytes.Equal and
|
||||
// floating point values which specially treat NaNs as equal.
|
||||
// Message values are compared by recursively calling Equal.
|
||||
// Lists are equal if each element value is also equal.
|
||||
// Maps are equal if they have the same set of keys, where the pair of values
|
||||
// for each key is also equal.
|
||||
func Equal(x, y Message) bool {
|
||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
317
e2e/vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
317
e2e/vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
@ -1,317 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// filePath is the path to the proto source file.
|
||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||
|
||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||
type fileDescGZIP = []byte
|
||||
|
||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||
|
||||
// RegisterFile is called from generated code to register the compressed
|
||||
// FileDescriptorProto with the file path for a proto source file.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||
// Decompress the descriptor.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
|
||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||
// Note that DescBuilder.Build automatically registers the constructed
|
||||
// file descriptor with the v2 registry.
|
||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
fileCache.Store(s, d)
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||
// for a proto source file. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||
func FileDescriptor(s filePath) fileDescGZIP {
|
||||
if v, ok := fileCache.Load(s); ok {
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
|
||||
// Find the descriptor in the v2 registry.
|
||||
var b []byte
|
||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||
}
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
if len(b) > 0 {
|
||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||
// neither the full Go name nor the full protobuf name of the enum.
|
||||
// The name is the dot-separated combination of just the proto package that the
|
||||
// enum is declared within followed by the Go type name of the generated enum.
|
||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||
|
||||
// enumsByName maps enum values by name to their numeric counterpart.
|
||||
type enumsByName = map[string]int32
|
||||
|
||||
// enumsByNumber maps enum values by number to their name counterpart.
|
||||
type enumsByNumber = map[int32]string
|
||||
|
||||
var enumCache sync.Map // map[enumName]enumsByName
|
||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||
|
||||
// RegisterEnum is called from the generated code to register the mapping of
|
||||
// enum value names to enum numbers for the enum identified by s.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||
if _, ok := enumCache.Load(s); ok {
|
||||
panic("proto: duplicate enum registered: " + s)
|
||||
}
|
||||
enumCache.Store(s, m)
|
||||
|
||||
// This does not forward registration to the v2 registry since this API
|
||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||
// the enum of the given name. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||
func EnumValueMap(s enumName) enumsByName {
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
|
||||
// Check whether the cache is stale. If the number of files in the current
|
||||
// package differs, then it means that some enums may have been recently
|
||||
// registered upstream that we do not know about.
|
||||
var protoPkg protoreflect.FullName
|
||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||
protoPkg = protoreflect.FullName(s[:i])
|
||||
}
|
||||
v, _ := numFilesCache.Load(protoPkg)
|
||||
numFiles, _ := v.(int)
|
||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||
return nil // cache is up-to-date; was not found earlier
|
||||
}
|
||||
|
||||
// Update the enum cache for all enums declared in the given proto package.
|
||||
numFiles = 0
|
||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||
name := protoimpl.X.LegacyEnumName(ed)
|
||||
if _, ok := enumCache.Load(name); !ok {
|
||||
m := make(enumsByName)
|
||||
evs := ed.Values()
|
||||
for i := evs.Len() - 1; i >= 0; i-- {
|
||||
ev := evs.Get(i)
|
||||
m[string(ev.Name())] = int32(ev.Number())
|
||||
}
|
||||
enumCache.LoadOrStore(name, m)
|
||||
}
|
||||
})
|
||||
numFiles++
|
||||
return true
|
||||
})
|
||||
numFilesCache.Store(protoPkg, numFiles)
|
||||
|
||||
// Check cache again for enum map.
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkEnums recursively walks all enums declared in d.
|
||||
func walkEnums(d interface {
|
||||
Enums() protoreflect.EnumDescriptors
|
||||
Messages() protoreflect.MessageDescriptors
|
||||
}, f func(protoreflect.EnumDescriptor)) {
|
||||
eds := d.Enums()
|
||||
for i := eds.Len() - 1; i >= 0; i-- {
|
||||
f(eds.Get(i))
|
||||
}
|
||||
mds := d.Messages()
|
||||
for i := mds.Len() - 1; i >= 0; i-- {
|
||||
walkEnums(mds.Get(i), f)
|
||||
}
|
||||
}
|
||||
|
||||
// messageName is the full name of protobuf message.
|
||||
type messageName = string
|
||||
|
||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||
|
||||
// RegisterType is called from generated code to register the message Go type
|
||||
// for a message of the given name.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||
func RegisterType(m Message, s messageName) {
|
||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code to register the Go map type
|
||||
// for a protobuf message representing a map entry.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMapType(m interface{}, s messageName) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||
}
|
||||
if _, ok := messageTypeCache.Load(s); ok {
|
||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||
}
|
||||
messageTypeCache.Store(s, t)
|
||||
}
|
||||
|
||||
// MessageType returns the message type for a named message.
|
||||
// It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||
func MessageType(s messageName) reflect.Type {
|
||||
if v, ok := messageTypeCache.Load(s); ok {
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
|
||||
// Derive the message type from the v2 registry.
|
||||
var t reflect.Type
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||
t = messageGoType(mt)
|
||||
}
|
||||
|
||||
// If we could not get a concrete type, it is possible that it is a
|
||||
// pseudo-message for a map entry.
|
||||
if t == nil {
|
||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||
t = reflect.MapOf(kt, vt)
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the message type for the given name.
|
||||
if t != nil {
|
||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||
switch k := fd.Kind(); k {
|
||||
case protoreflect.EnumKind:
|
||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||
return enumGoType(et)
|
||||
}
|
||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||
return messageGoType(mt)
|
||||
}
|
||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||
default:
|
||||
return reflect.TypeOf(fd.Default().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||
return reflect.TypeOf(et.New(0))
|
||||
}
|
||||
|
||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||
}
|
||||
|
||||
// MessageName returns the full protobuf name for the given message type.
|
||||
//
|
||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||
func MessageName(m Message) messageName {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||
}
|
||||
|
||||
// RegisterExtension is called from the generated code to register
|
||||
// the extension descriptor.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||
func RegisterExtension(d *ExtensionDesc) {
|
||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||
|
||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions for the
|
||||
// provided protobuf message, indexed by the extension field number.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||
// Check whether the cache is stale. If the number of extensions for
|
||||
// the given message differs, then it means that some extensions were
|
||||
// recently registered upstream that we do not know about.
|
||||
s := MessageName(m)
|
||||
v, _ := extensionCache.Load(s)
|
||||
xs, _ := v.(extensionsByNumber)
|
||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||
return xs // cache is up-to-date
|
||||
}
|
||||
|
||||
// Cache is stale, re-compute the extensions map.
|
||||
xs = make(extensionsByNumber)
|
||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||
} else {
|
||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||
// custom type not generated by protoc-gen-go. We could try and
|
||||
// convert the type to an ExtensionDesc.
|
||||
}
|
||||
return true
|
||||
})
|
||||
extensionCache.Store(s, xs)
|
||||
return xs
|
||||
}
|
801
e2e/vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
801
e2e/vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
@ -1,801 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextUnmarshalV2 = false
|
||||
|
||||
// ParseError is returned by UnmarshalText.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
|
||||
// Deprecated: Do not use.
|
||||
Line, Offset int
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if wrapTextUnmarshalV2 {
|
||||
return e.Message
|
||||
}
|
||||
if e.Line == 1 {
|
||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||
}
|
||||
|
||||
// UnmarshalText parses a proto text formatted string into m.
|
||||
func UnmarshalText(s string, m Message) error {
|
||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||
return u.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
mi := MessageV2(m)
|
||||
|
||||
if wrapTextUnmarshalV2 {
|
||||
err := prototext.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
}.Unmarshal([]byte(s), mi)
|
||||
if err != nil {
|
||||
return &ParseError{Message: err.Error()}
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
} else {
|
||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
seen := make(map[protoreflect.FieldNumber]bool)
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := protoreflect.Name(tok.value)
|
||||
fd := fds.ByName(name)
|
||||
switch {
|
||||
case fd == nil:
|
||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||
fd = gd
|
||||
}
|
||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||
fd = nil
|
||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
typeName := string(md.FullName())
|
||||
if m, ok := m.Interface().(Message); ok {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
typeName = t.Elem().String()
|
||||
}
|
||||
}
|
||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||
}
|
||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||
}
|
||||
seen[fd.Number()] = true
|
||||
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||
name, err := p.consumeExtensionOrAnyName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||
if err != nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||
}
|
||||
m2 := mt.New()
|
||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||
}
|
||||
|
||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||
valFD := m.Descriptor().Fields().ByName("value")
|
||||
if seen[urlFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||
}
|
||||
if seen[valFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||
}
|
||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||
seen[urlFD.Number()] = true
|
||||
seen[valFD.Number()] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
xname := protoreflect.FullName(name)
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
return p.errorf("unrecognized extension %q", name)
|
||||
}
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
v, err = p.unmarshalValue(v, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return p.consumeOptionalSeparator()
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := v.List()
|
||||
var err error
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order.
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
keyFD := fd.MapKey()
|
||||
valFD := fd.MapValue()
|
||||
|
||||
mv := v.Map()
|
||||
kv := keyFD.Default()
|
||||
vv := mv.NewValue()
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
var err error
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
mv.Set(kv.MapKey(), vv)
|
||||
return v, nil
|
||||
default:
|
||||
p.back()
|
||||
return p.unmarshalSingularValue(v, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
return protoreflect.ValueOfBool(true), nil
|
||||
case "false", "0", "f", "False":
|
||||
return protoreflect.ValueOfBool(false), nil
|
||||
}
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||
}
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||
}
|
||||
case protoreflect.FloatKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||
}
|
||||
case protoreflect.DoubleKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||
}
|
||||
case protoreflect.BytesKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||
}
|
||||
case protoreflect.EnumKind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||
}
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||
if vd != nil {
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
err := p.unmarshalMessage(v.Message(), terminator)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
if fd.Message() == nil {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||
// the following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in unmarshalMessage to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(rune(i)), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
560
e2e/vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
560
e2e/vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
@ -1,560 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextMarshalV2 = false
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line)
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes the proto text format of m to w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||
b, err := tm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Text returns a proto text formatted string of m.
|
||||
func (tm *TextMarshaler) Text(m Message) string {
|
||||
b, _ := tm.marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return []byte("<nil>"), nil
|
||||
}
|
||||
|
||||
if wrapTextMarshalV2 {
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
return m.MarshalText()
|
||||
}
|
||||
|
||||
opts := prototext.MarshalOptions{
|
||||
AllowPartial: true,
|
||||
EmitUnknown: true,
|
||||
}
|
||||
if !tm.Compact {
|
||||
opts.Indent = " "
|
||||
}
|
||||
if !tm.ExpandAny {
|
||||
opts.Resolver = (*protoregistry.Types)(nil)
|
||||
}
|
||||
return opts.Marshal(mr.Interface())
|
||||
} else {
|
||||
w := &textWriter{
|
||||
compact: tm.Compact,
|
||||
expandAny: tm.ExpandAny,
|
||||
complete: true,
|
||||
}
|
||||
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Write(b)
|
||||
return w.buf, nil
|
||||
}
|
||||
|
||||
err := w.writeMessage(mr)
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// MarshalText writes the proto text format of m to w.
|
||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// MarshalTextString returns a proto text formatted string of m.
|
||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||
|
||||
// CompactText writes the compact proto text format of m to w.
|
||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// CompactTextString returns a compact proto text formatted string of m.
|
||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
compact bool // same as TextMarshaler.Compact
|
||||
expandAny bool // same as TextMarshaler.ExpandAny
|
||||
complete bool // whether the current position is a complete line
|
||||
indent int // indentation level; never negative
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, p...)
|
||||
w.complete = false
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
w.buf = append(w.buf, ' ')
|
||||
n++
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
if i+1 < len(frags) {
|
||||
w.buf = append(w.buf, '\n')
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, c)
|
||||
w.complete = c == '\n'
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
w.buf = append(w.buf, fd.Name()...)
|
||||
w.WriteByte(':')
|
||||
} else {
|
||||
// Use message type name for group field name.
|
||||
w.buf = append(w.buf, fd.Message().Name()...)
|
||||
}
|
||||
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||
md := m.Descriptor()
|
||||
fdURL := md.Fields().ByName("type_url")
|
||||
fdVal := md.Fields().ByName("value")
|
||||
|
||||
url := m.Get(fdURL).String()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b := m.Get(fdVal).Bytes()
|
||||
m2 := mt.New()
|
||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
if requiresQuotes(url) {
|
||||
w.writeQuotedString(url)
|
||||
} else {
|
||||
w.Write([]byte(url))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.indent++
|
||||
}
|
||||
if err := w.writeMessage(m2); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.indent--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fds := md.Fields()
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
if fd == nil || !m.Has(fd) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := m.Get(fd).List()
|
||||
for j := 0; j < lv.Len(); j++ {
|
||||
w.writeName(fd)
|
||||
v := lv.Get(j)
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := m.Get(fd).Map()
|
||||
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
for _, entry := range entries {
|
||||
w.writeName(fd)
|
||||
w.WriteByte('<')
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
w.writeName(kfd)
|
||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.writeName(vfd)
|
||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.indent--
|
||||
w.WriteByte('>')
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
default:
|
||||
w.writeName(fd)
|
||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
if b := m.GetUnknown(); len(b) > 0 {
|
||||
w.writeUnknownFields(b)
|
||||
}
|
||||
return w.writeExtensions(m)
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||
switch vf := v.Float(); {
|
||||
case math.IsInf(vf, +1):
|
||||
w.Write(posInf)
|
||||
case math.IsInf(vf, -1):
|
||||
w.Write(negInf)
|
||||
case math.IsNaN(vf):
|
||||
w.Write(nan)
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||
w.writeQuotedString(string(v.String()))
|
||||
case protoreflect.BytesKind:
|
||||
w.writeQuotedString(string(v.Bytes()))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var bra, ket byte = '<', '>'
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
w.WriteByte(bra)
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
m := v.Message()
|
||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||
b, err := m2.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.writeMessage(m)
|
||||
}
|
||||
w.indent--
|
||||
w.WriteByte(ket)
|
||||
case protoreflect.EnumKind:
|
||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||
fmt.Fprint(w, ev.Name())
|
||||
} else {
|
||||
fmt.Fprint(w, v.Enum())
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||
func (w *textWriter) writeQuotedString(s string) {
|
||||
w.WriteByte('"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
w.buf = append(w.buf, `\n`...)
|
||||
case '\r':
|
||||
w.buf = append(w.buf, `\r`...)
|
||||
case '\t':
|
||||
w.buf = append(w.buf, `\t`...)
|
||||
case '"':
|
||||
w.buf = append(w.buf, `\"`...)
|
||||
case '\\':
|
||||
w.buf = append(w.buf, `\\`...)
|
||||
default:
|
||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||
w.buf = append(w.buf, c)
|
||||
} else {
|
||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteByte('"')
|
||||
}
|
||||
|
||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||
if !w.compact {
|
||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
num, wtyp, n := protowire.ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
if wtyp == protowire.EndGroupType {
|
||||
w.indent--
|
||||
w.Write(endBraceNewline)
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w, num)
|
||||
if wtyp != protowire.StartGroupType {
|
||||
w.WriteByte(':')
|
||||
}
|
||||
if !w.compact || wtyp == protowire.StartGroupType {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed32Type:
|
||||
v, n := protowire.ConsumeFixed32(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed64Type:
|
||||
v, n := protowire.ConsumeFixed64(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.BytesType:
|
||||
v, n := protowire.ConsumeBytes(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprintf(w, "%q", v)
|
||||
case protowire.StartGroupType:
|
||||
w.WriteByte('{')
|
||||
w.indent++
|
||||
default:
|
||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// writeExtensions writes all the extensions in m.
|
||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if md.ExtensionRanges().Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
// For message set, use the name of the message as the extension name.
|
||||
name := string(ext.desc.FullName())
|
||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
if !ext.desc.IsList() {
|
||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lv := ext.val.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
fmt.Fprintf(w, "[%s]:", name)
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
for i := 0; i < w.indent*2; i++ {
|
||||
w.buf = append(w.buf, ' ')
|
||||
}
|
||||
w.complete = false
|
||||
}
|
78
e2e/vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
78
e2e/vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// Size returns the size in bytes of the wire-format encoding of m.
|
||||
func Size(m Message) int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
return protoV2.Size(mi)
|
||||
}
|
||||
|
||||
// Marshal returns the wire-format encoding of m.
|
||||
func Marshal(m Message) ([]byte, error) {
|
||||
b, err := marshalAppend(nil, m, false)
|
||||
if b == nil {
|
||||
b = zeroBytes
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
var zeroBytes = make([]byte, 0, 0)
|
||||
|
||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, ErrNil
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
nbuf, err := protoV2.MarshalOptions{
|
||||
Deterministic: deterministic,
|
||||
AllowPartial: true,
|
||||
}.MarshalAppend(buf, mi)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
if len(buf) == len(nbuf) {
|
||||
if !mi.ProtoReflect().IsValid() {
|
||||
return buf, ErrNil
|
||||
}
|
||||
}
|
||||
return nbuf, checkRequiredNotSet(mi)
|
||||
}
|
||||
|
||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||
//
|
||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||
func Unmarshal(b []byte, m Message) error {
|
||||
m.Reset()
|
||||
return UnmarshalMerge(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||
func UnmarshalMerge(b []byte, m Message) error {
|
||||
mi := MessageV2(m)
|
||||
out, err := protoV2.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Merge: true,
|
||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||
Buf: b,
|
||||
Message: mi.ProtoReflect(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||
return nil
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
34
e2e/vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
34
e2e/vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// Bool stores v in a new bool value and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int stores v in a new int32 value and returns a pointer to it.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||
|
||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||
func Float32(v float32) *float32 { return &v }
|
||||
|
||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// String stores v in a new string value and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
10
e2e/vendor/github.com/google/btree/README.md
generated
vendored
10
e2e/vendor/github.com/google/btree/README.md
generated
vendored
@ -1,10 +0,0 @@
|
||||
# BTree implementation for Go
|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
The API is based off of the wonderful
|
||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
||||
act as a drop-in replacement for gollrb trees.
|
||||
|
||||
See http://godoc.org/github.com/google/btree for documentation.
|
893
e2e/vendor/github.com/google/btree/btree.go
generated
vendored
893
e2e/vendor/github.com/google/btree/btree.go
generated
vendored
@ -1,893 +0,0 @@
|
||||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implementation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
var (
|
||||
nilItems = make(items, 16)
|
||||
nilChildren = make(children, 16)
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
mu sync.Mutex
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
f.mu.Lock()
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
f.mu.Unlock()
|
||||
return new(node)
|
||||
}
|
||||
n = f.freelist[index]
|
||||
f.freelist[index] = nil
|
||||
f.freelist = f.freelist[:index]
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// freeNode adds the given node to the list, returning true if it was added
|
||||
// and false if it was discarded.
|
||||
func (f *FreeList) freeNode(n *node) (out bool) {
|
||||
f.mu.Lock()
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
out = true
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
cow: ©OnWriteContext{freelist: f},
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index items. index must be less than or equal to length.
|
||||
func (s *items) truncate(index int) {
|
||||
var toClear items
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilItems):]
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index children. index must be less than or equal to length.
|
||||
func (s *children) truncate(index int) {
|
||||
var toClear children
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilChildren):]
|
||||
}
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
|
||||
if n.cow == cow {
|
||||
return n
|
||||
}
|
||||
out := cow.newNode()
|
||||
if cap(out.items) >= len(n.items) {
|
||||
out.items = out.items[:len(n.items)]
|
||||
} else {
|
||||
out.items = make(items, len(n.items), cap(n.items))
|
||||
}
|
||||
copy(out.items, n.items)
|
||||
// Copy children
|
||||
if cap(out.children) >= len(n.children) {
|
||||
out.children = out.children[:len(n.children)]
|
||||
} else {
|
||||
out.children = make(children, len(n.children), cap(n.children))
|
||||
}
|
||||
copy(out.children, n.children)
|
||||
return out
|
||||
}
|
||||
|
||||
func (n *node) mutableChild(i int) *node {
|
||||
c := n.children[i].mutableFor(n.cow)
|
||||
n.children[i] = c
|
||||
return c
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.cow.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items.truncate(i)
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children.truncate(i + 1)
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.mutableChild(i)
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.mutableChild(i).insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
if len(n.children[i].items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i - 1)
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i + 1)
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.cow.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
descend = direction(-1)
|
||||
ascend = direction(+1)
|
||||
)
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
//
|
||||
// When ascending, the 'start' should be less than 'stop' and when descending,
|
||||
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
||||
// will force the iterator to include the first item when it equals 'start',
|
||||
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
||||
// "greaterThan" or "lessThan" queries.
|
||||
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
|
||||
var ok, found bool
|
||||
var index int
|
||||
switch dir {
|
||||
case ascend:
|
||||
if start != nil {
|
||||
index, _ = n.items.find(start)
|
||||
}
|
||||
for i := index; i < len(n.items); i++ {
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
|
||||
hit = true
|
||||
continue
|
||||
}
|
||||
hit = true
|
||||
if stop != nil && !n.items[i].Less(stop) {
|
||||
return hit, false
|
||||
}
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
case descend:
|
||||
if start != nil {
|
||||
index, found = n.items.find(start)
|
||||
if !found {
|
||||
index = index - 1
|
||||
}
|
||||
} else {
|
||||
index = len(n.items) - 1
|
||||
}
|
||||
for i := index; i >= 0; i-- {
|
||||
if start != nil && !n.items[i].Less(start) {
|
||||
if !includeStart || hit || start.Less(n.items[i]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if stop != nil && !stop.Less(n.items[i]) {
|
||||
return hit, false // continue
|
||||
}
|
||||
hit = true
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return hit, true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
// copyOnWriteContext pointers determine node ownership... a tree with a write
|
||||
// context equivalent to a node's write context is allowed to modify that node.
|
||||
// A tree whose write context does not match a node's is not allowed to modify
|
||||
// it, and must create a new, writable copy (IE: it's a Clone).
|
||||
//
|
||||
// When doing any write operation, we maintain the invariant that the current
|
||||
// node's context is equal to the context of the tree that requested the write.
|
||||
// We do this by, before we descend into any node, creating a copy with the
|
||||
// correct context if the contexts don't match.
|
||||
//
|
||||
// Since the node we're currently visiting on any write has the requesting
|
||||
// tree's context, that node is modifiable in place. Children of that node may
|
||||
// not share context, but before we descend into them, we'll make a mutable
|
||||
// copy.
|
||||
type copyOnWriteContext struct {
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// Clone clones the btree, lazily. Clone should not be called concurrently,
|
||||
// but the original tree (t) and the new tree (t2) can be used concurrently
|
||||
// once the Clone call completes.
|
||||
//
|
||||
// The internal tree structure of b is marked read-only and shared between t and
|
||||
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
|
||||
// whenever one of b's original nodes would have been modified. Read operations
|
||||
// should have no performance degredation. Write operations for both t and t2
|
||||
// will initially experience minor slow-downs caused by additional allocs and
|
||||
// copies due to the aforementioned copy-on-write logic, but should converge to
|
||||
// the original performance characteristics of the original tree.
|
||||
func (t *BTree) Clone() (t2 *BTree) {
|
||||
// Create two entirely new copy-on-write contexts.
|
||||
// This operation effectively creates three trees:
|
||||
// the original, shared nodes (old b.cow)
|
||||
// the new b.cow nodes
|
||||
// the new out.cow nodes
|
||||
cow1, cow2 := *t.cow, *t.cow
|
||||
out := *t
|
||||
t.cow = &cow1
|
||||
out.cow = &cow2
|
||||
return &out
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (c *copyOnWriteContext) newNode() (n *node) {
|
||||
n = c.freelist.newNode()
|
||||
n.cow = c
|
||||
return
|
||||
}
|
||||
|
||||
type freeType int
|
||||
|
||||
const (
|
||||
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
|
||||
ftStored // node was stored in the freelist for later use
|
||||
ftNotOwned // node was ignored by COW, since it's owned by another one
|
||||
)
|
||||
|
||||
// freeNode frees a node within a given COW context, if it's owned by that
|
||||
// context. It returns what happened to the node (see freeType const
|
||||
// documentation).
|
||||
func (c *copyOnWriteContext) freeNode(n *node) freeType {
|
||||
if n.cow == c {
|
||||
// clear to allow GC
|
||||
n.items.truncate(0)
|
||||
n.children.truncate(0)
|
||||
n.cow = nil
|
||||
if c.freelist.freeNode(n) {
|
||||
return ftStored
|
||||
} else {
|
||||
return ftFreelistFull
|
||||
}
|
||||
} else {
|
||||
return ftNotOwned
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else {
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.cow.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// DescendRange calls the iterator for every value in the tree within the range
|
||||
// [lessOrEqual, greaterThan), until iterator returns false.
|
||||
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
||||
// [pivot, first], until iterator returns false.
|
||||
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendGreaterThan calls the iterator for every value in the tree within
|
||||
// the range [last, pivot), until iterator returns false.
|
||||
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// Descend calls the iterator for every value in the tree within the range
|
||||
// [last, first], until iterator returns false.
|
||||
func (t *BTree) Descend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Clear removes all items from the btree. If addNodesToFreelist is true,
|
||||
// t's nodes are added to its freelist as part of this call, until the freelist
|
||||
// is full. Otherwise, the root node is simply dereferenced and the subtree
|
||||
// left to Go's normal GC processes.
|
||||
//
|
||||
// This can be much faster
|
||||
// than calling Delete on all elements, because that requires finding/removing
|
||||
// each element in the tree and updating the tree accordingly. It also is
|
||||
// somewhat faster than creating a new tree to replace the old one, because
|
||||
// nodes from the old tree are reclaimed into the freelist for use by the new
|
||||
// one, instead of being lost to the garbage collector.
|
||||
//
|
||||
// This call takes:
|
||||
// O(1): when addNodesToFreelist is false, this is a single operation.
|
||||
// O(1): when the freelist is already full, it breaks out immediately
|
||||
// O(freelist size): when the freelist is empty and the nodes are all owned
|
||||
// by this tree, nodes are added to the freelist until full.
|
||||
// O(tree size): when all nodes are owned by another tree, all nodes are
|
||||
// iterated over looking for nodes to add to the freelist, and due to
|
||||
// ownership, none are.
|
||||
func (t *BTree) Clear(addNodesToFreelist bool) {
|
||||
if t.root != nil && addNodesToFreelist {
|
||||
t.root.reset(t.cow)
|
||||
}
|
||||
t.root, t.length = nil, 0
|
||||
}
|
||||
|
||||
// reset returns a subtree to the freelist. It breaks out immediately if the
|
||||
// freelist is full, since the only benefit of iterating is to fill that
|
||||
// freelist up. Returns true if parent reset call should continue.
|
||||
func (n *node) reset(c *copyOnWriteContext) bool {
|
||||
for _, child := range n.children {
|
||||
if !child.reset(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return c.freeNode(n) != ftFreelistFull
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
1083
e2e/vendor/github.com/google/btree/btree_generic.go
generated
vendored
1083
e2e/vendor/github.com/google/btree/btree_generic.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
e2e/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
|
8
e2e/vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
8
e2e/vendor/github.com/google/cadvisor/container/containerd/client.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
tasksapi "github.com/containerd/containerd/api/services/tasks/v1"
|
||||
versionapi "github.com/containerd/containerd/api/services/version/v1"
|
||||
tasktypes "github.com/containerd/containerd/api/types/task"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/errgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -114,7 +114,7 @@ func (c *client) LoadContainer(ctx context.Context, id string) (*containers.Cont
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
return nil, errgrpc.ToNative(err)
|
||||
}
|
||||
return containerFromProto(r.Container), nil
|
||||
}
|
||||
@ -124,7 +124,7 @@ func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
ContainerID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errdefs.FromGRPC(err)
|
||||
return 0, errgrpc.ToNative(err)
|
||||
}
|
||||
if response.Process.Status == tasktypes.Status_UNKNOWN {
|
||||
return 0, ErrTaskIsInUnknownState
|
||||
@ -135,7 +135,7 @@ func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) {
|
||||
func (c *client) Version(ctx context.Context) (string, error) {
|
||||
response, err := c.versionService.Version(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return "", errdefs.FromGRPC(err)
|
||||
return "", errgrpc.ToNative(err)
|
||||
}
|
||||
return response.Version, nil
|
||||
}
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/containerd/handler.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/crio/handler.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/common"
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@ -66,6 +66,7 @@ const (
|
||||
ResctrlMetrics MetricKind = "resctrl"
|
||||
CPUSetMetrics MetricKind = "cpuset"
|
||||
OOMMetrics MetricKind = "oom_event"
|
||||
PressureMetrics MetricKind = "pressure"
|
||||
)
|
||||
|
||||
// AllMetrics represents all kinds of metrics that cAdvisor supported.
|
||||
@ -91,6 +92,7 @@ var AllMetrics = MetricSet{
|
||||
ResctrlMetrics: struct{}{},
|
||||
CPUSetMetrics: struct{}{},
|
||||
OOMMetrics: struct{}{},
|
||||
PressureMetrics: struct{}{},
|
||||
}
|
||||
|
||||
// AllNetworkMetrics represents all network metrics that cAdvisor supports.
|
||||
|
35
e2e/vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
35
e2e/vendor/github.com/google/cadvisor/container/libcontainer/handler.go
generated
vendored
@ -28,8 +28,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/cgroups"
|
||||
"github.com/opencontainers/cgroups/fs2"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
@ -717,10 +717,7 @@ func scanUDPStats(r io.Reader) (info.UdpStat, error) {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
var listening, dropped, rxQueued, txQueued uint64
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
@ -733,8 +730,11 @@ func scanUDPStats(r io.Reader) (info.UdpStat, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
var rx, tx uint64
|
||||
_, err := fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
@ -771,6 +771,7 @@ func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
|
||||
ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
|
||||
ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
|
||||
ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
|
||||
setPSIStats(s.CpuStats.PSI, &ret.Cpu.PSI)
|
||||
|
||||
if !withPerCPU {
|
||||
return
|
||||
@ -792,6 +793,7 @@ func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.DiskIo.IoWaitTime = diskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
|
||||
ret.DiskIo.IoMerged = diskStatsCopy(s.BlkioStats.IoMergedRecursive)
|
||||
ret.DiskIo.IoTime = diskStatsCopy(s.BlkioStats.IoTimeRecursive)
|
||||
setPSIStats(s.BlkioStats.PSI, &ret.DiskIo.PSI)
|
||||
}
|
||||
|
||||
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
@ -799,6 +801,7 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.KernelUsage = s.MemoryStats.KernelUsage.Usage
|
||||
setPSIStats(s.MemoryStats.PSI, &ret.Memory.PSI)
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["file"]
|
||||
@ -884,6 +887,22 @@ func setHugepageStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
}
|
||||
}
|
||||
|
||||
func setPSIData(d *cgroups.PSIData, ret *info.PSIData) {
|
||||
if d != nil {
|
||||
ret.Total = d.Total
|
||||
ret.Avg10 = d.Avg10
|
||||
ret.Avg60 = d.Avg60
|
||||
ret.Avg300 = d.Avg300
|
||||
}
|
||||
}
|
||||
|
||||
func setPSIStats(s *cgroups.PSIStats, ret *info.PSIStats) {
|
||||
if s != nil {
|
||||
setPSIData(&s.Full, &ret.Full)
|
||||
setPSIData(&s.Some, &ret.Some)
|
||||
}
|
||||
}
|
||||
|
||||
// read from pids path not cpu
|
||||
func setThreadsStats(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
if s != nil {
|
||||
|
15
e2e/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
15
e2e/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@ -17,15 +17,12 @@ package libcontainer
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/google/cadvisor/container"
|
||||
|
||||
fs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
configs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/cgroups"
|
||||
fs "github.com/opencontainers/cgroups/fs"
|
||||
fs2 "github.com/opencontainers/cgroups/fs2"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@ -157,9 +154,9 @@ func diskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStat
|
||||
}
|
||||
|
||||
func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) {
|
||||
config := &configs.Cgroup{
|
||||
config := &cgroups.Cgroup{
|
||||
Name: name,
|
||||
Resources: &configs.Resources{},
|
||||
Resources: &cgroups.Resources{},
|
||||
}
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
path := paths[""]
|
||||
|
2
e2e/vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
2
e2e/vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/machine"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"github.com/opencontainers/cgroups"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
|
24
e2e/vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
24
e2e/vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@ -261,6 +261,26 @@ func (ci *ContainerInfo) StatsEndTime() time.Time {
|
||||
return ret
|
||||
}
|
||||
|
||||
// PSI statistics for an individual resource.
|
||||
type PSIStats struct {
|
||||
// PSI data for all tasks of in the cgroup.
|
||||
Full PSIData `json:"full,omitempty"`
|
||||
// PSI data for some tasks in the cgroup.
|
||||
Some PSIData `json:"some,omitempty"`
|
||||
}
|
||||
|
||||
type PSIData struct {
|
||||
// Total time duration for tasks in the cgroup have waited due to congestion.
|
||||
// Unit: nanoseconds.
|
||||
Total uint64 `json:"total"`
|
||||
// The average (in %) tasks have waited due to congestion over a 10 second window.
|
||||
Avg10 float64 `json:"avg10"`
|
||||
// The average (in %) tasks have waited due to congestion over a 60 second window.
|
||||
Avg60 float64 `json:"avg60"`
|
||||
// The average (in %) tasks have waited due to congestion over a 300 second window.
|
||||
Avg300 float64 `json:"avg300"`
|
||||
}
|
||||
|
||||
// This mirrors kernel internal structure.
|
||||
type LoadStats struct {
|
||||
// Number of sleeping tasks.
|
||||
@ -335,6 +355,7 @@ type CpuStats struct {
|
||||
LoadAverage int32 `json:"load_average"`
|
||||
// from LoadStats.NrUninterruptible
|
||||
LoadDAverage int32 `json:"load_d_average"`
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type PerDiskStats struct {
|
||||
@ -353,6 +374,7 @@ type DiskIoStats struct {
|
||||
IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"`
|
||||
IoMerged []PerDiskStats `json:"io_merged,omitempty"`
|
||||
IoTime []PerDiskStats `json:"io_time,omitempty"`
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type HugetlbStats struct {
|
||||
@ -411,6 +433,8 @@ type MemoryStats struct {
|
||||
|
||||
ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
|
||||
HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
|
||||
|
||||
PSI PSIStats `json:"psi"`
|
||||
}
|
||||
|
||||
type CPUSetStats struct {
|
||||
|
6
e2e/vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
6
e2e/vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@ -45,7 +45,7 @@ import (
|
||||
"github.com/google/cadvisor/version"
|
||||
"github.com/google/cadvisor/watcher"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/cgroups"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
@ -221,7 +221,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, HousekeepingConfi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newManager.resctrlManager, err = resctrl.NewManager(resctrlInterval, resctrl.Setup, machineInfo.CPUVendorID, inHostNamespace)
|
||||
newManager.resctrlManager, err = resctrl.NewManager(resctrlInterval, machineInfo.CPUVendorID, inHostNamespace)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Cannot gather resctrl metrics: %v", err)
|
||||
}
|
||||
@ -265,7 +265,7 @@ type manager struct {
|
||||
eventsChannel chan watcher.ContainerEvent
|
||||
collectorHTTPClient *http.Client
|
||||
perfManager stats.Manager
|
||||
resctrlManager resctrl.Manager
|
||||
resctrlManager resctrl.ResControlManager
|
||||
// List of raw container cgroup path prefix whitelist.
|
||||
rawContainerCgroupPathPrefixWhiteList []string
|
||||
// List of container env prefix whitelist, the matched container envs would be collected into metrics as extra labels.
|
||||
|
171
e2e/vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
171
e2e/vendor/github.com/google/cadvisor/resctrl/collector.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Collector of resctrl for a container.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const noInterval = 0
|
||||
|
||||
type collector struct {
|
||||
id string
|
||||
interval time.Duration
|
||||
getContainerPids func() ([]string, error)
|
||||
resctrlPath string
|
||||
running bool
|
||||
destroyed bool
|
||||
numberOfNUMANodes int
|
||||
vendorID string
|
||||
mu sync.Mutex
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func newCollector(id string, getContainerPids func() ([]string, error), interval time.Duration, numberOfNUMANodes int, vendorID string, inHostNamespace bool) *collector {
|
||||
return &collector{id: id, interval: interval, getContainerPids: getContainerPids, numberOfNUMANodes: numberOfNUMANodes,
|
||||
vendorID: vendorID, mu: sync.Mutex{}, inHostNamespace: inHostNamespace}
|
||||
}
|
||||
|
||||
func (c *collector) setup() error {
|
||||
var err error
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
|
||||
if c.interval != noInterval {
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
} else {
|
||||
c.running = true
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(c.interval)
|
||||
c.mu.Lock()
|
||||
if c.destroyed {
|
||||
break
|
||||
}
|
||||
klog.V(5).Infof("Trying to check %q containers control group.", c.id)
|
||||
if c.running {
|
||||
err = c.checkMonitoringGroup()
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to check %q resctrl collector control group: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
} else {
|
||||
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
c.running = false
|
||||
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// There is no interval set, if setup fail, stop.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to setup container %q resctrl collector: %w", c.id, err)
|
||||
}
|
||||
c.running = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) checkMonitoringGroup() error {
|
||||
newPath, err := prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't obtain mon_group path: %v", err)
|
||||
}
|
||||
|
||||
// Check if container moved between control groups.
|
||||
if newPath != c.resctrlPath {
|
||||
err = c.clear()
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear previous monitoring group: %w", err)
|
||||
}
|
||||
c.resctrlPath = newPath
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.running {
|
||||
stats.Resctrl = info.ResctrlStats{}
|
||||
|
||||
resctrlStats, err := getIntelRDTStatsFrom(c.resctrlPath, c.vendorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, c.numberOfNUMANodes)
|
||||
stats.Resctrl.Cache = make([]info.CacheStats, 0, c.numberOfNUMANodes)
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.MBMStats {
|
||||
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
|
||||
info.MemoryBandwidthStats{
|
||||
TotalBytes: numaNodeStats.MBMTotalBytes,
|
||||
LocalBytes: numaNodeStats.MBMLocalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
for _, numaNodeStats := range *resctrlStats.CMTStats {
|
||||
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
|
||||
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) Destroy() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.running = false
|
||||
err := c.clear()
|
||||
if err != nil {
|
||||
klog.Errorf("trying to destroy %q resctrl collector but: %v", c.id, err)
|
||||
}
|
||||
c.destroyed = true
|
||||
}
|
||||
|
||||
func (c *collector) clear() error {
|
||||
// Not allowed to remove root or undefined resctrl directory.
|
||||
if c.id != rootContainer && c.resctrlPath != "" {
|
||||
// Remove only own prepared mon group.
|
||||
if strings.HasPrefix(filepath.Base(c.resctrlPath), monGroupPrefix) {
|
||||
err := os.RemoveAll(c.resctrlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't clear mon_group: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
58
e2e/vendor/github.com/google/cadvisor/resctrl/factory.go
generated
vendored
Normal file
58
e2e/vendor/github.com/google/cadvisor/resctrl/factory.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2025 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/stats"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type ResControlManager interface {
|
||||
Destroy()
|
||||
GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
|
||||
}
|
||||
|
||||
// All registered auth provider plugins.
|
||||
var pluginsLock sync.Mutex
|
||||
var plugins = make(map[string]ResControlManagerPlugin)
|
||||
|
||||
type ResControlManagerPlugin interface {
|
||||
NewManager(interval time.Duration, vendorID string, inHostNamespace bool) (ResControlManager, error)
|
||||
}
|
||||
|
||||
func RegisterPlugin(name string, plugin ResControlManagerPlugin) error {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
if _, found := plugins[name]; found {
|
||||
return fmt.Errorf("ResControlManagerPlugin %q was registered twice", name)
|
||||
}
|
||||
klog.V(4).Infof("Registered ResControlManagerPlugin %q", name)
|
||||
plugins[name] = plugin
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewManager(interval time.Duration, vendorID string, inHostNamespace bool) (ResControlManager, error) {
|
||||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
for _, plugin := range plugins {
|
||||
return plugin.NewManager(interval, vendorID, inHostNamespace)
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find plugins for resctrl manager")
|
||||
}
|
79
e2e/vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
79
e2e/vendor/github.com/google/cadvisor/resctrl/manager.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Manager of resctrl for containers.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
"github.com/google/cadvisor/stats"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
Destroy()
|
||||
GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
stats.NoopDestroy
|
||||
interval time.Duration
|
||||
vendorID string
|
||||
inHostNamespace bool
|
||||
}
|
||||
|
||||
func (m *manager) GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error) {
|
||||
collector := newCollector(containerName, getContainerPids, m.interval, numberOfNUMANodes, m.vendorID, m.inHostNamespace)
|
||||
err := collector.setup()
|
||||
if err != nil {
|
||||
return &stats.NoopCollector{}, err
|
||||
}
|
||||
|
||||
return collector, nil
|
||||
}
|
||||
|
||||
func NewManager(interval time.Duration, setup func() error, vendorID string, inHostNamespace bool) (Manager, error) {
|
||||
err := setup()
|
||||
if err != nil {
|
||||
return &NoopManager{}, err
|
||||
}
|
||||
|
||||
if !isResctrlInitialized {
|
||||
return &NoopManager{}, errors.New("the resctrl isn't initialized")
|
||||
}
|
||||
if !(enabledCMT || enabledMBM) {
|
||||
return &NoopManager{}, errors.New("there are no monitoring features available")
|
||||
}
|
||||
|
||||
if !*raw.DockerOnly {
|
||||
klog.Warning("--docker_only should be set when collecting Resctrl metrics! See the runtime docs.")
|
||||
}
|
||||
|
||||
return &manager{interval: interval, vendorID: vendorID, inHostNamespace: inHostNamespace}, nil
|
||||
}
|
||||
|
||||
type NoopManager struct {
|
||||
stats.NoopDestroy
|
||||
}
|
||||
|
||||
func (np *NoopManager) GetCollector(_ string, _ func() ([]string, error), _ int) (stats.Collector, error) {
|
||||
return &stats.NoopCollector{}, nil
|
||||
}
|
369
e2e/vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
369
e2e/vendor/github.com/google/cadvisor/resctrl/utils.go
generated
vendored
@ -1,369 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Copyright 2021 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Utilities.
|
||||
package resctrl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
|
||||
"github.com/opencontainers/runc/libcontainer/intelrdt"
|
||||
)
|
||||
|
||||
const (
|
||||
cpuCgroup = "cpu"
|
||||
rootContainer = "/"
|
||||
monitoringGroupDir = "mon_groups"
|
||||
processTask = "task"
|
||||
cpusFileName = "cpus"
|
||||
cpusListFileName = "cpus_list"
|
||||
schemataFileName = "schemata"
|
||||
tasksFileName = "tasks"
|
||||
modeFileName = "mode"
|
||||
sizeFileName = "size"
|
||||
infoDirName = "info"
|
||||
monDataDirName = "mon_data"
|
||||
monGroupsDirName = "mon_groups"
|
||||
noPidsPassedError = "there are no pids passed"
|
||||
noContainerNameError = "there are no container name passed"
|
||||
noControlGroupFoundError = "couldn't find control group matching container"
|
||||
llcOccupancyFileName = "llc_occupancy"
|
||||
mbmLocalBytesFileName = "mbm_local_bytes"
|
||||
mbmTotalBytesFileName = "mbm_total_bytes"
|
||||
containerPrefix = '/'
|
||||
minContainerNameLen = 2 // "/<container_name>" e.g. "/a"
|
||||
unavailable = "Unavailable"
|
||||
monGroupPrefix = "cadvisor"
|
||||
)
|
||||
|
||||
var (
|
||||
rootResctrl = ""
|
||||
pidsPath = ""
|
||||
processPath = "/proc"
|
||||
enabledMBM = false
|
||||
enabledCMT = false
|
||||
isResctrlInitialized = false
|
||||
groupDirectories = map[string]struct{}{
|
||||
cpusFileName: {},
|
||||
cpusListFileName: {},
|
||||
infoDirName: {},
|
||||
monDataDirName: {},
|
||||
monGroupsDirName: {},
|
||||
schemataFileName: {},
|
||||
tasksFileName: {},
|
||||
modeFileName: {},
|
||||
sizeFileName: {},
|
||||
}
|
||||
)
|
||||
|
||||
func Setup() error {
|
||||
var err error
|
||||
rootResctrl, err = intelrdt.Root()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize resctrl: %v", err)
|
||||
}
|
||||
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
pidsPath = fs2.UnifiedMountpoint
|
||||
} else {
|
||||
pidsPath = filepath.Join(fs2.UnifiedMountpoint, cpuCgroup)
|
||||
}
|
||||
|
||||
enabledMBM = intelrdt.IsMBMEnabled()
|
||||
enabledCMT = intelrdt.IsCMTEnabled()
|
||||
|
||||
isResctrlInitialized = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareMonitoringGroup(containerName string, getContainerPids func() ([]string, error), inHostNamespace bool) (string, error) {
|
||||
if containerName == rootContainer {
|
||||
return rootResctrl, nil
|
||||
}
|
||||
|
||||
pids, err := getContainerPids()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf("couldn't obtain %q container pids: there is no pids in cgroup", containerName)
|
||||
}
|
||||
|
||||
// Firstly, find the control group to which the container belongs.
|
||||
// Consider the root group.
|
||||
controlGroupPath, err := findGroup(rootResctrl, pids, true, false)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%q %q: %q", noControlGroupFoundError, containerName, err)
|
||||
}
|
||||
if controlGroupPath == "" {
|
||||
return "", fmt.Errorf("%q %q", noControlGroupFoundError, containerName)
|
||||
}
|
||||
|
||||
// Check if there is any monitoring group.
|
||||
monGroupPath, err := findGroup(filepath.Join(controlGroupPath, monGroupsDirName), pids, false, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't find monitoring group matching %q container: %v", containerName, err)
|
||||
}
|
||||
|
||||
// Prepare new one if not exists.
|
||||
if monGroupPath == "" {
|
||||
// Remove leading prefix.
|
||||
// e.g. /my/container -> my/container
|
||||
if len(containerName) >= minContainerNameLen && containerName[0] == containerPrefix {
|
||||
containerName = containerName[1:]
|
||||
}
|
||||
|
||||
// Add own prefix and use `-` instead `/`.
|
||||
// e.g. my/container -> cadvisor-my-container
|
||||
properContainerName := fmt.Sprintf("%s-%s", monGroupPrefix, strings.Replace(containerName, "/", "-", -1))
|
||||
monGroupPath = filepath.Join(controlGroupPath, monitoringGroupDir, properContainerName)
|
||||
|
||||
err = os.MkdirAll(monGroupPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't create monitoring group directory for %q container: %w", containerName, err)
|
||||
}
|
||||
|
||||
if !inHostNamespace {
|
||||
processPath = "/rootfs/proc"
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
processThreads, err := getAllProcessThreads(filepath.Join(processPath, pid, processTask))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, thread := range processThreads {
|
||||
err = intelrdt.WriteIntelRdtTasks(monGroupPath, thread)
|
||||
if err != nil {
|
||||
secondError := os.Remove(monGroupPath)
|
||||
if secondError != nil {
|
||||
return "", fmt.Errorf(
|
||||
"coudn't assign pids to %q container monitoring group: %w \n couldn't clear %q monitoring group: %v",
|
||||
containerName, err, containerName, secondError)
|
||||
}
|
||||
return "", fmt.Errorf("coudn't assign pids to %q container monitoring group: %w", containerName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return monGroupPath, nil
|
||||
}
|
||||
|
||||
func getPids(containerName string) ([]int, error) {
|
||||
if len(containerName) == 0 {
|
||||
// No container name passed.
|
||||
return nil, fmt.Errorf(noContainerNameError)
|
||||
}
|
||||
pids, err := cgroups.GetAllPids(filepath.Join(pidsPath, containerName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't obtain pids for %q container: %v", containerName, err)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// getAllProcessThreads obtains all available processes from directory.
|
||||
// e.g. ls /proc/4215/task/ -> 4215, 4216, 4217, 4218
|
||||
// func will return [4215, 4216, 4217, 4218].
|
||||
func getAllProcessThreads(path string) ([]int, error) {
|
||||
processThreads := make([]int, 0)
|
||||
|
||||
threadDirs, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return processThreads, err
|
||||
}
|
||||
|
||||
for _, dir := range threadDirs {
|
||||
pid, err := strconv.Atoi(dir.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't parse %q dir: %v", dir.Name(), err)
|
||||
}
|
||||
processThreads = append(processThreads, pid)
|
||||
}
|
||||
|
||||
return processThreads, nil
|
||||
}
|
||||
|
||||
// findGroup returns the path of a control/monitoring group in which the pids are.
|
||||
func findGroup(group string, pids []string, includeGroup bool, exclusive bool) (string, error) {
|
||||
if len(pids) == 0 {
|
||||
return "", fmt.Errorf(noPidsPassedError)
|
||||
}
|
||||
|
||||
availablePaths := make([]string, 0)
|
||||
if includeGroup {
|
||||
availablePaths = append(availablePaths, group)
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(group)
|
||||
for _, file := range files {
|
||||
if _, ok := groupDirectories[file.Name()]; !ok {
|
||||
availablePaths = append(availablePaths, filepath.Join(group, file.Name()))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't obtain groups paths: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range availablePaths {
|
||||
groupFound, err := arePIDsInGroup(path, pids, exclusive)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if groupFound {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// arePIDsInGroup returns true if all of the pids are within control group.
|
||||
func arePIDsInGroup(path string, pids []string, exclusive bool) (bool, error) {
|
||||
if len(pids) == 0 {
|
||||
return false, fmt.Errorf("couldn't obtain pids from %q path: %v", path, noPidsPassedError)
|
||||
}
|
||||
|
||||
tasks, err := readTasksFile(filepath.Join(path, tasksFileName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
any := false
|
||||
for _, pid := range pids {
|
||||
_, ok := tasks[pid]
|
||||
if !ok {
|
||||
// There are missing pids within group.
|
||||
if any {
|
||||
return false, fmt.Errorf("there should be all pids in group")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
any = true
|
||||
}
|
||||
|
||||
// Check if there should be only passed pids in group.
|
||||
if exclusive {
|
||||
if len(tasks) != len(pids) {
|
||||
return false, fmt.Errorf("group should have container pids only")
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// readTasksFile returns pids map from given tasks path.
|
||||
func readTasksFile(tasksPath string) (map[string]struct{}, error) {
|
||||
tasks := make(map[string]struct{})
|
||||
|
||||
tasksFile, err := os.Open(tasksPath)
|
||||
if err != nil {
|
||||
return tasks, fmt.Errorf("couldn't read tasks file from %q path: %w", tasksPath, err)
|
||||
}
|
||||
defer tasksFile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(tasksFile)
|
||||
for scanner.Scan() {
|
||||
tasks[scanner.Text()] = struct{}{}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return tasks, fmt.Errorf("couldn't obtain pids from %q path: %w", tasksPath, err)
|
||||
}
|
||||
|
||||
return tasks, nil
|
||||
}
|
||||
|
||||
func readStatFrom(path string, vendorID string) (uint64, error) {
|
||||
context, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
contextString := string(bytes.TrimSpace(context))
|
||||
|
||||
if contextString == unavailable {
|
||||
err := fmt.Errorf("\"Unavailable\" value from file %q", path)
|
||||
if vendorID == "AuthenticAMD" {
|
||||
kernelBugzillaLink := "https://bugzilla.kernel.org/show_bug.cgi?id=213311"
|
||||
err = fmt.Errorf("%v, possible bug: %q", err, kernelBugzillaLink)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
stat, err := strconv.ParseUint(contextString, 10, 64)
|
||||
if err != nil {
|
||||
return stat, fmt.Errorf("unable to parse %q as a uint from file %q", string(context), path)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
func getIntelRDTStatsFrom(path string, vendorID string) (intelrdt.Stats, error) {
|
||||
stats := intelrdt.Stats{}
|
||||
|
||||
statsDirectories, err := filepath.Glob(filepath.Join(path, monDataDirName, "*"))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
if len(statsDirectories) == 0 {
|
||||
return stats, fmt.Errorf("there is no mon_data stats directories: %q", path)
|
||||
}
|
||||
|
||||
var cmtStats []intelrdt.CMTNumaNodeStats
|
||||
var mbmStats []intelrdt.MBMNumaNodeStats
|
||||
|
||||
for _, dir := range statsDirectories {
|
||||
if enabledCMT {
|
||||
llcOccupancy, err := readStatFrom(filepath.Join(dir, llcOccupancyFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
cmtStats = append(cmtStats, intelrdt.CMTNumaNodeStats{LLCOccupancy: llcOccupancy})
|
||||
}
|
||||
if enabledMBM {
|
||||
mbmTotalBytes, err := readStatFrom(filepath.Join(dir, mbmTotalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmLocalBytes, err := readStatFrom(filepath.Join(dir, mbmLocalBytesFileName), vendorID)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
mbmStats = append(mbmStats, intelrdt.MBMNumaNodeStats{
|
||||
MBMTotalBytes: mbmTotalBytes,
|
||||
MBMLocalBytes: mbmLocalBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
stats.CMTStats = &cmtStats
|
||||
stats.MBMStats = &mbmStats
|
||||
|
||||
return stats, nil
|
||||
}
|
8
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
8
e2e/vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
@ -47,7 +47,7 @@ type netlinkMessage struct {
|
||||
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = m.Header
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
binary.Write(w, Endian, m.GenHeader)
|
||||
_ = binary.Write(w, Endian, m.GenHeader)
|
||||
w.Write(m.Data)
|
||||
rawmsg.Data = w.Bytes()
|
||||
return rawmsg
|
||||
@ -94,13 +94,13 @@ func addAttribute(buf *bytes.Buffer, attrType uint16, data interface{}, dataSize
|
||||
Type: attrType,
|
||||
}
|
||||
attr.Len += uint16(dataSize)
|
||||
binary.Write(buf, Endian, attr)
|
||||
_ = binary.Write(buf, Endian, attr)
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
binary.Write(buf, Endian, []byte(data))
|
||||
_ = binary.Write(buf, Endian, []byte(data))
|
||||
buf.WriteByte(0) // terminate
|
||||
default:
|
||||
binary.Write(buf, Endian, data)
|
||||
_ = binary.Write(buf, Endian, data)
|
||||
}
|
||||
for i := 0; i < padding(int(attr.Len), syscall.NLMSG_ALIGNTO); i++ {
|
||||
buf.WriteByte(0)
|
||||
|
201
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
201
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
generated
vendored
@ -1,201 +0,0 @@
|
||||
#vendor
|
||||
vendor/
|
||||
|
||||
# Created by .ignore support plugin (hsz.mobi)
|
||||
coverage.txt
|
||||
### Go template
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
### Windows template
|
||||
# Windows image file caches
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
|
||||
# Folder config file
|
||||
Desktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
### Kate template
|
||||
# Swap Files #
|
||||
.*.kate-swp
|
||||
.swp.*
|
||||
### SublimeText template
|
||||
# cache files for sublime text
|
||||
*.tmlanguage.cache
|
||||
*.tmPreferences.cache
|
||||
*.stTheme.cache
|
||||
|
||||
# workspace files are user-specific
|
||||
*.sublime-workspace
|
||||
|
||||
# project files should be checked into the repository, unless a significant
|
||||
# proportion of contributors will probably not be using SublimeText
|
||||
# *.sublime-project
|
||||
|
||||
# sftp configuration file
|
||||
sftp-config.json
|
||||
### Linux template
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
### JetBrains template
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea
|
||||
.idea/tasks.xml
|
||||
.idea/dictionaries
|
||||
.idea/vcs.xml
|
||||
.idea/jsLibraryMappings.xml
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/dataSources.local.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/gradle.xml
|
||||
.idea/libraries
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
### Xcode template
|
||||
# Xcode
|
||||
#
|
||||
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
|
||||
|
||||
## Build generated
|
||||
build/
|
||||
DerivedData/
|
||||
|
||||
## Various settings
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
*.mode1v3
|
||||
!default.mode1v3
|
||||
*.mode2v3
|
||||
!default.mode2v3
|
||||
*.perspectivev3
|
||||
!default.perspectivev3
|
||||
xcuserdata/
|
||||
|
||||
## Other
|
||||
*.moved-aside
|
||||
*.xccheckout
|
||||
*.xcscmblueprint
|
||||
### Eclipse template
|
||||
|
||||
.metadata
|
||||
bin/
|
||||
tmp/
|
||||
*.tmp
|
||||
*.bak
|
||||
*.swp
|
||||
*~.nib
|
||||
local.properties
|
||||
.settings/
|
||||
.loadpath
|
||||
.recommenders
|
||||
|
||||
# Eclipse Core
|
||||
.project
|
||||
|
||||
# External tool builders
|
||||
.externalToolBuilders/
|
||||
|
||||
# Locally stored "Eclipse launch configurations"
|
||||
*.launch
|
||||
|
||||
# PyDev specific (Python IDE for Eclipse)
|
||||
*.pydevproject
|
||||
|
||||
# CDT-specific (C/C++ Development Tooling)
|
||||
.cproject
|
||||
|
||||
# JDT-specific (Eclipse Java Development Tools)
|
||||
.classpath
|
||||
|
||||
# Java annotation processor (APT)
|
||||
.factorypath
|
||||
|
||||
# PDT-specific (PHP Development Tools)
|
||||
.buildpath
|
||||
|
||||
# sbteclipse plugin
|
||||
.target
|
||||
|
||||
# Tern plugin
|
||||
.tern-project
|
||||
|
||||
# TeXlipse plugin
|
||||
.texlipse
|
||||
|
||||
# STS (Spring Tool Suite)
|
||||
.springBeans
|
||||
|
||||
# Code Recommenders
|
||||
.recommenders/
|
||||
|
25
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
25
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
generated
vendored
@ -1,25 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
# * github.com/grpc/grpc-go still supports go1.6
|
||||
# - When we drop support for go1.6 we can remove golang.org/x/net/context
|
||||
# below as it is part of the Go std library since go1.7
|
||||
# * github.com/prometheus/client_golang already requires at least go1.7 since
|
||||
# September 2017
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
install:
|
||||
- go get github.com/prometheus/client_golang/prometheus
|
||||
- go get google.golang.org/grpc
|
||||
- go get golang.org/x/net/context
|
||||
- go get github.com/stretchr/testify
|
||||
script:
|
||||
- make test
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
24
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
24
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
|
||||
|
||||
### Added
|
||||
|
||||
* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
|
||||
* Support non-default/global Prometheus registry.
|
||||
* Allow configuring counters with `prometheus.CounterOpts`.
|
||||
|
||||
### Changed
|
||||
|
||||
* Remove usage of deprecated `grpc.Code()`.
|
||||
* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
|
||||
|
||||
---
|
||||
|
||||
This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
|
201
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
generated
vendored
201
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
247
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
247
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
@ -1,247 +0,0 @@
|
||||
# Go gRPC Interceptors for Prometheus monitoring
|
||||
|
||||
[](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
|
||||
[](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](LICENSE)
|
||||
|
||||
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
|
||||
|
||||
A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
|
||||
|
||||
## Interceptors
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
|
||||
by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
|
||||
common patterns: auth, logging and... monitoring.
|
||||
|
||||
To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
|
||||
|
||||
## Usage
|
||||
|
||||
There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
|
||||
|
||||
### Server-side
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
// Initialize your gRPC server's interceptor.
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||
)
|
||||
// Register your gRPC service implementations.
|
||||
myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
|
||||
// After all your registrations, make sure all of the Prometheus metrics are initialized.
|
||||
grpc_prometheus.Register(myServer)
|
||||
// Register Prometheus metrics handler.
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
...
|
||||
```
|
||||
|
||||
### Client-side
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
...
|
||||
```
|
||||
|
||||
# Metrics
|
||||
|
||||
## Labels
|
||||
|
||||
All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
|
||||
contain the same rich labels:
|
||||
|
||||
* `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
|
||||
the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and
|
||||
`service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
|
||||
* `grpc_method` - the name of the method called on the gRPC service. E.g.
|
||||
`grpc_method="Ping"`
|
||||
* `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle).
|
||||
Differentiating between the two is important especially for latency measurements.
|
||||
|
||||
- `unary` is single request, single response RPC
|
||||
- `client_stream` is a multi-request, single response RPC
|
||||
- `server_stream` is a single request, multi-response RPC
|
||||
- `bidi_stream` is a multi-request, multi-response RPC
|
||||
|
||||
|
||||
Additionally for completed RPCs, the following labels are used:
|
||||
|
||||
* `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
|
||||
The list of all statuses is to long, but here are some common ones:
|
||||
|
||||
- `OK` - means the RPC was successful
|
||||
- `IllegalArgument` - RPC contained bad values
|
||||
- `Internal` - server-side error not disclosed to the clients
|
||||
|
||||
## Counters
|
||||
|
||||
The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go)
|
||||
the respective Prometheus handler (usually `/metrics`).
|
||||
|
||||
For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
|
||||
|
||||
For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
|
||||
calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
|
||||
|
||||
First, immediately after the server receives the call it will increment the
|
||||
`grpc_server_started_total` and start the handling time clock (if histograms are enabled).
|
||||
|
||||
```jsoniq
|
||||
grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
Then the user logic gets invoked. It receives one message from the client containing the request
|
||||
(it's a `server_stream`):
|
||||
|
||||
```jsoniq
|
||||
grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
The user logic may return an error, or send multiple messages back to the client. In this case, on
|
||||
each of the 20 messages sent back, a counter will be incremented:
|
||||
|
||||
```jsoniq
|
||||
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
|
||||
```
|
||||
|
||||
After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
and the relevant call labels increment the `grpc_server_handled_total` counter.
|
||||
|
||||
```jsoniq
|
||||
grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
## Histograms
|
||||
|
||||
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
|
||||
to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
|
||||
the latency monitoring metrics are disabled by default. To enable them please call the following
|
||||
in your server initialization code:
|
||||
|
||||
```jsoniq
|
||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||
```
|
||||
|
||||
After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
|
||||
|
||||
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
|
||||
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
|
||||
calculating average handling times
|
||||
* `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
|
||||
handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
|
||||
|
||||
The counter values will look as follows:
|
||||
|
||||
```jsoniq
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
|
||||
grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
|
||||
grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
|
||||
grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
|
||||
```
|
||||
|
||||
|
||||
## Useful query examples
|
||||
|
||||
Prometheus philosophy is to provide raw metrics to the monitoring system, and
|
||||
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
|
||||
flexibility. Here's a couple of useful monitoring queries:
|
||||
|
||||
|
||||
### request inbound rate
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
|
||||
rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
|
||||
how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
|
||||
|
||||
### unary request error rate
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the
|
||||
ones that didn't finish with `OK` code.
|
||||
|
||||
### unary request error percentage
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
|
||||
* 100.0
|
||||
```
|
||||
For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
|
||||
this is a combination of the two above examples. This is an example of a query you would like to
|
||||
[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
|
||||
"no more than 1% requests should fail".
|
||||
|
||||
### average response stream size
|
||||
```jsoniq
|
||||
sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
|
||||
```
|
||||
For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
|
||||
server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows
|
||||
you to track when clients started to send "wide" queries that ret
|
||||
Note the divisor is the number of started RPCs, in order to account for in-flight requests.
|
||||
|
||||
### 99%-tile latency of unary requests
|
||||
```jsoniq
|
||||
histogram_quantile(0.99,
|
||||
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
|
||||
)
|
||||
```
|
||||
For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
|
||||
of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
|
||||
estimation will take samples in a rolling `5m` window. When combined with other quantiles
|
||||
(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system
|
||||
(e.g. impact of caching).
|
||||
|
||||
### percentage of slow unary queries (>250ms)
|
||||
```jsoniq
|
||||
100.0 - (
|
||||
sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
|
||||
/
|
||||
sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
|
||||
) * 100.0
|
||||
```
|
||||
For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25`
|
||||
seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
|
||||
buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
|
||||
This is an example of a query you would like to alert on in your system for SLA violations,
|
||||
e.g. "less than 1% of requests are slower than 250ms".
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io).
|
||||
|
||||
## License
|
||||
|
||||
`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
39
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
39
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
// gRPC Prometheus monitoring interceptors for client-side gRPC.
|
||||
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultClientMetrics is the default instance of ClientMetrics. It is
|
||||
// intended to be used in conjunction the default Prometheus metrics
|
||||
// registry.
|
||||
DefaultClientMetrics = NewClientMetrics()
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of
|
||||
// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
|
||||
// query. This function acts on the DefaultClientMetrics variable and the
|
||||
// default Prometheus metrics registry.
|
||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
|
||||
prom.Register(DefaultClientMetrics.clientHandledHistogram)
|
||||
}
|
170
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
170
e2e/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
@ -1,170 +0,0 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ClientMetrics represents a collection of metrics to be registered on a
|
||||
// Prometheus metrics registry for a gRPC client.
|
||||
type ClientMetrics struct {
|
||||
clientStartedCounter *prom.CounterVec
|
||||
clientHandledCounter *prom.CounterVec
|
||||
clientStreamMsgReceived *prom.CounterVec
|
||||
clientStreamMsgSent *prom.CounterVec
|
||||
clientHandledHistogramEnabled bool
|
||||
clientHandledHistogramOpts prom.HistogramOpts
|
||||
clientHandledHistogram *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewClientMetrics returns a ClientMetrics object. Use a new instance of
|
||||
// ClientMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ClientMetrics{
|
||||
clientStartedCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_started_total",
|
||||
Help: "Total number of RPCs started on the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_handled_total",
|
||||
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
|
||||
clientStreamMsgReceived: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientStreamMsgSent: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledHistogramEnabled: false,
|
||||
clientHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_client_handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
|
||||
Buckets: prom.DefBuckets,
|
||||
},
|
||||
clientHandledHistogram: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent.
|
||||
func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
|
||||
m.clientStartedCounter.Describe(ch)
|
||||
m.clientHandledCounter.Describe(ch)
|
||||
m.clientStreamMsgReceived.Describe(ch)
|
||||
m.clientStreamMsgSent.Describe(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent.
|
||||
func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
|
||||
m.clientStartedCounter.Collect(ch)
|
||||
m.clientHandledCounter.Collect(ch)
|
||||
m.clientStreamMsgReceived.Collect(ch)
|
||||
m.clientStreamMsgSent.Collect(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
|
||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
|
||||
func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&m.clientHandledHistogramOpts)
|
||||
}
|
||||
if !m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram = prom.NewHistogramVec(
|
||||
m.clientHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
}
|
||||
m.clientHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
monitor := newClientReporter(m, Unary, method)
|
||||
monitor.SentMessage()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
if err != nil {
|
||||
monitor.ReceivedMessage()
|
||||
}
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
monitor := newClientReporter(m, clientStreamType(desc), method)
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
if err != nil {
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return nil, err
|
||||
}
|
||||
return &monitoredClientStream{clientStream, monitor}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
|
||||
if desc.ClientStreams && !desc.ServerStreams {
|
||||
return ClientStream
|
||||
} else if !desc.ClientStreams && desc.ServerStreams {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredClientStream struct {
|
||||
grpc.ClientStream
|
||||
monitor *clientReporter
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
|
||||
err := s.ClientStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
|
||||
err := s.ClientStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
} else if err == io.EOF {
|
||||
s.monitor.Handled(codes.OK)
|
||||
} else {
|
||||
st, _ := status.FromError(err)
|
||||
s.monitor.Handled(st.Code())
|
||||
}
|
||||
return err
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user