mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 11:00:25 +00:00
rebase: bump sigs.k8s.io/controller-runtime
Bumps the k8s-dependencies group with 1 update in the / directory: [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime). Updates `sigs.k8s.io/controller-runtime` from 0.17.3 to 0.18.2 - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.17.3...v0.18.2) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
c8af2b638a
commit
c1ee11261e
27
go.mod
27
go.mod
@ -1,7 +1,6 @@
|
|||||||
module github.com/ceph/ceph-csi
|
module github.com/ceph/ceph-csi
|
||||||
|
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
toolchain go1.22.2
|
toolchain go1.22.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -22,8 +21,8 @@ require (
|
|||||||
github.com/kubernetes-csi/csi-lib-utils v0.17.0
|
github.com/kubernetes-csi/csi-lib-utils v0.17.0
|
||||||
github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
|
github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
|
||||||
github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1
|
github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1
|
||||||
github.com/onsi/ginkgo/v2 v2.15.0
|
github.com/onsi/ginkgo/v2 v2.17.1
|
||||||
github.com/onsi/gomega v1.31.1
|
github.com/onsi/gomega v1.32.0
|
||||||
github.com/pkg/xattr v0.4.9
|
github.com/pkg/xattr v0.4.9
|
||||||
github.com/prometheus/client_golang v1.18.0
|
github.com/prometheus/client_golang v1.18.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
@ -35,8 +34,8 @@ require (
|
|||||||
//
|
//
|
||||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||||
//
|
//
|
||||||
k8s.io/api v0.29.3
|
k8s.io/api v0.30.0
|
||||||
k8s.io/apimachinery v0.29.3
|
k8s.io/apimachinery v0.30.0
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
k8s.io/client-go v12.0.0+incompatible
|
||||||
k8s.io/cloud-provider v0.29.3
|
k8s.io/cloud-provider v0.29.3
|
||||||
k8s.io/klog/v2 v2.120.1
|
k8s.io/klog/v2 v2.120.1
|
||||||
@ -44,7 +43,7 @@ require (
|
|||||||
k8s.io/mount-utils v0.29.3
|
k8s.io/mount-utils v0.29.3
|
||||||
k8s.io/pod-security-admission v0.29.3
|
k8s.io/pod-security-admission v0.29.3
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||||
sigs.k8s.io/controller-runtime v0.17.3
|
sigs.k8s.io/controller-runtime v0.18.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -79,7 +78,7 @@ require (
|
|||||||
github.com/distribution/reference v0.5.0 // indirect
|
github.com/distribution/reference v0.5.0 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
|
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/gemalto/flume v0.13.0 // indirect
|
github.com/gemalto/flume v0.13.0 // indirect
|
||||||
@ -94,7 +93,7 @@ require (
|
|||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/google/cel-go v0.17.7 // indirect
|
github.com/google/cel-go v0.17.8 // indirect
|
||||||
github.com/google/gnostic-models v0.6.8 // indirect
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
@ -166,7 +165,7 @@ require (
|
|||||||
golang.org/x/term v0.20.0 // indirect
|
golang.org/x/term v0.20.0 // indirect
|
||||||
golang.org/x/text v0.15.0 // indirect
|
golang.org/x/text v0.15.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.16.1 // indirect
|
golang.org/x/tools v0.18.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
google.golang.org/appengine v1.6.8 // indirect
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
@ -176,16 +175,16 @@ require (
|
|||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.29.2 // indirect
|
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||||
k8s.io/apiserver v0.29.3 // indirect
|
k8s.io/apiserver v0.30.0 // indirect
|
||||||
k8s.io/component-base v0.29.3 // indirect
|
k8s.io/component-base v0.30.0 // indirect
|
||||||
k8s.io/component-helpers v0.29.3 // indirect
|
k8s.io/component-helpers v0.29.3 // indirect
|
||||||
k8s.io/controller-manager v0.29.3 // indirect
|
k8s.io/controller-manager v0.29.3 // indirect
|
||||||
k8s.io/kms v0.29.3 // indirect
|
k8s.io/kms v0.29.3 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||||
k8s.io/kubectl v0.0.0 // indirect
|
k8s.io/kubectl v0.0.0 // indirect
|
||||||
k8s.io/kubelet v0.0.0 // indirect
|
k8s.io/kubelet v0.0.0 // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
|
28
go.sum
28
go.sum
@ -966,8 +966,8 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
|
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||||
github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||||
@ -1115,8 +1115,9 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
|||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
|
|
||||||
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||||
|
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||||
|
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||||
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||||
github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec h1:bXRTgu+1I882EvvYVEUwehBVahYfqeX9Qqb9eUyPs/g=
|
github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec h1:bXRTgu+1I882EvvYVEUwehBVahYfqeX9Qqb9eUyPs/g=
|
||||||
github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec/go.mod h1:HyY8Z/kUPrnIKAwuhjrn2tSTM5/s9zfRRTqRMG0mHks=
|
github.com/google/fscrypt v0.3.6-0.20240502174735-068b9f8f5dec/go.mod h1:HyY8Z/kUPrnIKAwuhjrn2tSTM5/s9zfRRTqRMG0mHks=
|
||||||
@ -1441,8 +1442,8 @@ github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3Ro
|
|||||||
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
||||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
|
||||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
@ -1462,8 +1463,8 @@ github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRah
|
|||||||
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
|
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
|
||||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||||
github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
|
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
|
||||||
github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0=
|
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||||
@ -2208,8 +2209,9 @@ golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
|||||||
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
||||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
|
|
||||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||||
|
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||||
|
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -2621,8 +2623,9 @@ k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ=
|
|||||||
k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0=
|
k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0=
|
||||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||||
k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us=
|
k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us=
|
||||||
k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4=
|
k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4=
|
||||||
k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU=
|
k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU=
|
||||||
@ -2693,11 +2696,12 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
|||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
|
||||||
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
||||||
sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk=
|
sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q=
|
||||||
sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
|
sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
|
13
vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
generated
vendored
13
vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
generated
vendored
@ -167,6 +167,19 @@ func Marshal(v any) ([]byte, error) {
|
|||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MarshalEscaped(v any, escape bool) ([]byte, error) {
|
||||||
|
e := newEncodeState()
|
||||||
|
defer encodeStatePool.Put(e)
|
||||||
|
|
||||||
|
err := e.marshal(v, encOpts{escapeHTML: escape})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := append([]byte(nil), e.Bytes()...)
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalIndent is like Marshal but applies Indent to format the output.
|
// MarshalIndent is like Marshal but applies Indent to format the output.
|
||||||
// Each JSON element in the output will begin on a new line beginning with prefix
|
// Each JSON element in the output will begin on a new line beginning with prefix
|
||||||
// followed by one or more copies of indent according to the indentation nesting.
|
// followed by one or more copies of indent according to the indentation nesting.
|
||||||
|
24
vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
generated
vendored
24
vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
generated
vendored
@ -6,7 +6,7 @@ package json
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -259,27 +259,7 @@ func (enc *Encoder) SetEscapeHTML(on bool) {
|
|||||||
// RawMessage is a raw encoded JSON value.
|
// RawMessage is a raw encoded JSON value.
|
||||||
// It implements Marshaler and Unmarshaler and can
|
// It implements Marshaler and Unmarshaler and can
|
||||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||||
type RawMessage []byte
|
type RawMessage = json.RawMessage
|
||||||
|
|
||||||
// MarshalJSON returns m as the JSON encoding of m.
|
|
||||||
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
|
||||||
if m == nil {
|
|
||||||
return []byte("null"), nil
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON sets *m to a copy of data.
|
|
||||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
|
||||||
if m == nil {
|
|
||||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
|
||||||
}
|
|
||||||
*m = append((*m)[0:0], data...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Marshaler = (*RawMessage)(nil)
|
|
||||||
var _ Unmarshaler = (*RawMessage)(nil)
|
|
||||||
|
|
||||||
// A Token holds a value of one of these types:
|
// A Token holds a value of one of these types:
|
||||||
//
|
//
|
||||||
|
52
vendor/github.com/evanphx/json-patch/v5/merge.go
generated
vendored
52
vendor/github.com/evanphx/json-patch/v5/merge.go
generated
vendored
@ -10,26 +10,26 @@ import (
|
|||||||
"github.com/evanphx/json-patch/v5/internal/json"
|
"github.com/evanphx/json-patch/v5/internal/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode {
|
||||||
curDoc, err := cur.intoDoc()
|
curDoc, err := cur.intoDoc(options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pruneNulls(patch)
|
pruneNulls(patch, options)
|
||||||
return patch
|
return patch
|
||||||
}
|
}
|
||||||
|
|
||||||
patchDoc, err := patch.intoDoc()
|
patchDoc, err := patch.intoDoc(options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return patch
|
return patch
|
||||||
}
|
}
|
||||||
|
|
||||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
mergeDocs(curDoc, patchDoc, mergeMerge, options)
|
||||||
|
|
||||||
return cur
|
return cur
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) {
|
||||||
for k, v := range patch.obj {
|
for k, v := range patch.obj {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
if mergeMerge {
|
if mergeMerge {
|
||||||
@ -45,55 +45,55 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
|||||||
}
|
}
|
||||||
doc.obj[k] = nil
|
doc.obj[k] = nil
|
||||||
} else {
|
} else {
|
||||||
_ = doc.remove(k, &ApplyOptions{})
|
_ = doc.remove(k, options)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
cur, ok := doc.obj[k]
|
cur, ok := doc.obj[k]
|
||||||
|
|
||||||
if !ok || cur == nil {
|
if !ok || cur == nil {
|
||||||
if !mergeMerge {
|
if !mergeMerge {
|
||||||
pruneNulls(v)
|
pruneNulls(v, options)
|
||||||
}
|
}
|
||||||
_ = doc.set(k, v, &ApplyOptions{})
|
_ = doc.set(k, v, options)
|
||||||
} else {
|
} else {
|
||||||
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
|
_ = doc.set(k, merge(cur, v, mergeMerge, options), options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneNulls(n *lazyNode) {
|
func pruneNulls(n *lazyNode, options *ApplyOptions) {
|
||||||
sub, err := n.intoDoc()
|
sub, err := n.intoDoc(options)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
pruneDocNulls(sub)
|
pruneDocNulls(sub, options)
|
||||||
} else {
|
} else {
|
||||||
ary, err := n.intoAry()
|
ary, err := n.intoAry()
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
pruneAryNulls(ary)
|
pruneAryNulls(ary, options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc {
|
||||||
for k, v := range doc.obj {
|
for k, v := range doc.obj {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
_ = doc.remove(k, &ApplyOptions{})
|
_ = doc.remove(k, &ApplyOptions{})
|
||||||
} else {
|
} else {
|
||||||
pruneNulls(v)
|
pruneNulls(v, options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return doc
|
return doc
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray {
|
||||||
newAry := []*lazyNode{}
|
newAry := []*lazyNode{}
|
||||||
|
|
||||||
for _, v := range ary.nodes {
|
for _, v := range ary.nodes {
|
||||||
if v != nil {
|
if v != nil {
|
||||||
pruneNulls(v)
|
pruneNulls(v, options)
|
||||||
}
|
}
|
||||||
newAry = append(newAry, v)
|
newAry = append(newAry, v)
|
||||||
}
|
}
|
||||||
@ -128,11 +128,17 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
|||||||
return nil, errBadJSONPatch
|
return nil, errBadJSONPatch
|
||||||
}
|
}
|
||||||
|
|
||||||
doc := &partialDoc{}
|
options := NewApplyOptions()
|
||||||
|
|
||||||
|
doc := &partialDoc{
|
||||||
|
opts: options,
|
||||||
|
}
|
||||||
|
|
||||||
docErr := doc.UnmarshalJSON(docData)
|
docErr := doc.UnmarshalJSON(docData)
|
||||||
|
|
||||||
patch := &partialDoc{}
|
patch := &partialDoc{
|
||||||
|
opts: options,
|
||||||
|
}
|
||||||
|
|
||||||
patchErr := patch.UnmarshalJSON(patchData)
|
patchErr := patch.UnmarshalJSON(patchData)
|
||||||
|
|
||||||
@ -158,7 +164,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
|||||||
if mergeMerge {
|
if mergeMerge {
|
||||||
doc = patch
|
doc = patch
|
||||||
} else {
|
} else {
|
||||||
doc = pruneDocNulls(patch)
|
doc = pruneDocNulls(patch, options)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
patchAry := &partialArray{}
|
patchAry := &partialArray{}
|
||||||
@ -172,7 +178,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
|||||||
return nil, errBadJSONPatch
|
return nil, errBadJSONPatch
|
||||||
}
|
}
|
||||||
|
|
||||||
pruneAryNulls(patchAry)
|
pruneAryNulls(patchAry, options)
|
||||||
|
|
||||||
out, patchErr := json.Marshal(patchAry.nodes)
|
out, patchErr := json.Marshal(patchAry.nodes)
|
||||||
|
|
||||||
@ -183,7 +189,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mergeDocs(doc, patch, mergeMerge)
|
mergeDocs(doc, patch, mergeMerge, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.Marshal(doc)
|
return json.Marshal(doc)
|
||||||
|
68
vendor/github.com/evanphx/json-patch/v5/patch.go
generated
vendored
68
vendor/github.com/evanphx/json-patch/v5/patch.go
generated
vendored
@ -38,6 +38,8 @@ var (
|
|||||||
ErrInvalid = errors.New("invalid state detected")
|
ErrInvalid = errors.New("invalid state detected")
|
||||||
ErrInvalidIndex = errors.New("invalid index referenced")
|
ErrInvalidIndex = errors.New("invalid index referenced")
|
||||||
|
|
||||||
|
ErrExpectedObject = errors.New("invalid value, expected object")
|
||||||
|
|
||||||
rawJSONArray = []byte("[]")
|
rawJSONArray = []byte("[]")
|
||||||
rawJSONObject = []byte("{}")
|
rawJSONObject = []byte("{}")
|
||||||
rawJSONNull = []byte("null")
|
rawJSONNull = []byte("null")
|
||||||
@ -60,6 +62,8 @@ type partialDoc struct {
|
|||||||
self *lazyNode
|
self *lazyNode
|
||||||
keys []string
|
keys []string
|
||||||
obj map[string]*lazyNode
|
obj map[string]*lazyNode
|
||||||
|
|
||||||
|
opts *ApplyOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
type partialArray struct {
|
type partialArray struct {
|
||||||
@ -90,6 +94,8 @@ type ApplyOptions struct {
|
|||||||
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
|
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
|
||||||
// Default to false.
|
// Default to false.
|
||||||
EnsurePathExistsOnAdd bool
|
EnsurePathExistsOnAdd bool
|
||||||
|
|
||||||
|
EscapeHTML bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
|
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
|
||||||
@ -99,6 +105,7 @@ func NewApplyOptions() *ApplyOptions {
|
|||||||
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
|
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
|
||||||
AllowMissingPathOnRemove: false,
|
AllowMissingPathOnRemove: false,
|
||||||
EnsurePathExistsOnAdd: false,
|
EnsurePathExistsOnAdd: false,
|
||||||
|
EscapeHTML: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,16 +141,28 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
|
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
|
||||||
|
if n.obj == nil {
|
||||||
|
return ErrExpectedObject
|
||||||
|
}
|
||||||
|
|
||||||
if err := buf.WriteByte('{'); err != nil {
|
if err := buf.WriteByte('{'); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
escaped := true
|
||||||
|
|
||||||
|
// n.opts should always be set, but in case we missed a case,
|
||||||
|
// guard.
|
||||||
|
if n.opts != nil {
|
||||||
|
escaped = n.opts.EscapeHTML
|
||||||
|
}
|
||||||
|
|
||||||
for i, k := range n.keys {
|
for i, k := range n.keys {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
if err := buf.WriteByte(','); err != nil {
|
if err := buf.WriteByte(','); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
key, err := json.Marshal(k)
|
key, err := json.MarshalEscaped(k, escaped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -153,7 +172,7 @@ func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
|
|||||||
if err := buf.WriteByte(':'); err != nil {
|
if err := buf.WriteByte(':'); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
value, err := json.Marshal(n.obj[k])
|
value, err := json.MarshalEscaped(n.obj[k], escaped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -194,11 +213,11 @@ func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
|
|||||||
return n.nodes, nil
|
return n.nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) {
|
||||||
if src == nil {
|
if src == nil {
|
||||||
return nil, 0, nil
|
return nil, 0, nil
|
||||||
}
|
}
|
||||||
a, err := json.Marshal(src)
|
a, err := json.MarshalEscaped(src, options.EscapeHTML)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
@ -216,7 +235,7 @@ func (n *lazyNode) nextByte() byte {
|
|||||||
return s[0]
|
return s[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) {
|
||||||
if n.which == eDoc {
|
if n.which == eDoc {
|
||||||
return n.doc, nil
|
return n.doc, nil
|
||||||
}
|
}
|
||||||
@ -235,6 +254,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
|||||||
return nil, ErrInvalid
|
return nil, ErrInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
n.doc.opts = options
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -545,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
|
|||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
doc, err = next.intoDoc()
|
doc, err = next.intoDoc(options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ""
|
return nil, ""
|
||||||
@ -557,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
|
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
|
||||||
|
if d.obj == nil {
|
||||||
|
return ErrExpectedObject
|
||||||
|
}
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, k := range d.keys {
|
for _, k := range d.keys {
|
||||||
if k == key {
|
if k == key {
|
||||||
@ -579,6 +603,11 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
|
|||||||
if key == "" {
|
if key == "" {
|
||||||
return d.self, nil
|
return d.self, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.obj == nil {
|
||||||
|
return nil, ErrExpectedObject
|
||||||
|
}
|
||||||
|
|
||||||
v, ok := d.obj[key]
|
v, ok := d.obj[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
|
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
|
||||||
@ -587,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *partialDoc) remove(key string, options *ApplyOptions) error {
|
func (d *partialDoc) remove(key string, options *ApplyOptions) error {
|
||||||
|
if d.obj == nil {
|
||||||
|
return ErrExpectedObject
|
||||||
|
}
|
||||||
|
|
||||||
_, ok := d.obj[key]
|
_, ok := d.obj[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
if options.AllowMissingPathOnRemove {
|
if options.AllowMissingPathOnRemove {
|
||||||
@ -750,6 +783,7 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
|
|||||||
} else {
|
} else {
|
||||||
pd = &partialDoc{
|
pd = &partialDoc{
|
||||||
self: val,
|
self: val,
|
||||||
|
opts: options,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -855,7 +889,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
|
|||||||
newNode := newLazyNode(newRawMessage(rawJSONObject))
|
newNode := newLazyNode(newRawMessage(rawJSONObject))
|
||||||
|
|
||||||
doc.add(part, newNode, options)
|
doc.add(part, newNode, options)
|
||||||
doc, err = newNode.intoDoc()
|
doc, err = newNode.intoDoc(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -868,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
doc, err = target.intoDoc()
|
doc, err = target.intoDoc(options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -954,6 +988,8 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
|
|||||||
if !val.tryAry() {
|
if !val.tryAry() {
|
||||||
return errors.Wrapf(err, "replace operation value must be object or array")
|
return errors.Wrapf(err, "replace operation value must be object or array")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
val.doc.opts = options
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1115,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
|
|||||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
|
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
valCopy, sz, err := deepCopy(val)
|
valCopy, sz, err := deepCopy(val, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error while performing deep copy")
|
return errors.Wrapf(err, "error while performing deep copy")
|
||||||
}
|
}
|
||||||
@ -1202,6 +1238,7 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
|
|||||||
} else {
|
} else {
|
||||||
pd = &partialDoc{
|
pd = &partialDoc{
|
||||||
self: self,
|
self: self,
|
||||||
|
opts: options,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1238,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if indent != "" {
|
data, err := json.MarshalEscaped(pd, options.EscapeHTML)
|
||||||
return json.MarshalIndent(pd, "", indent)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.Marshal(pd)
|
if indent == "" {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
json.Indent(&buf, data, "", indent)
|
||||||
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||||
|
3
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
3
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
@ -520,6 +520,9 @@ func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
|
|||||||
c.iterRanges.pop(comp.GetIterVar())
|
c.iterRanges.pop(comp.GetIterVar())
|
||||||
sum = sum.Add(c.cost(comp.Result))
|
sum = sum.Add(c.cost(comp.Result))
|
||||||
rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
|
rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
|
||||||
|
|
||||||
|
c.computedSizes[e.GetId()] = rangeCnt
|
||||||
|
|
||||||
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
|
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
|
||||||
sum = sum.Add(rangeCost)
|
sum = sum.Add(rangeCost)
|
||||||
|
|
||||||
|
35
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
35
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,38 @@
|
|||||||
|
## 2.17.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
|
||||||
|
|
||||||
|
## 2.17.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add `--github-output` for nicer output in github actions [e8a2056]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
|
||||||
|
- fix typo in core_dsl.go [977bc6f]
|
||||||
|
- Fix typo in docs [e297e7b]
|
||||||
|
|
||||||
|
## 2.16.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- add SpecContext to reporting nodes
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
|
||||||
|
- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- docs/index.md: Typo [2cebe8d]
|
||||||
|
- fix docs [06de431]
|
||||||
|
- chore: test with Go 1.22 (#1352) [898cba9]
|
||||||
|
- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
|
||||||
|
- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
|
||||||
|
- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
|
||||||
|
- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
|
||||||
|
- Fix docs for handling failures in goroutines (#1339) [4471b2e]
|
||||||
|
|
||||||
## 2.15.0
|
## 2.15.0
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
17
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
17
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
|
|||||||
|
|
||||||
err = global.Suite.BuildTree()
|
err = global.Suite.BuildTree()
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
suitePath, err := os.Getwd()
|
suitePath, err := getwd()
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
suitePath, err = filepath.Abs(suitePath)
|
suitePath, err = filepath.Abs(suitePath)
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels {
|
|||||||
return suiteLabels
|
return suiteLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getwd() (string, error) {
|
||||||
|
if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
|
||||||
|
// Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
|
||||||
|
// is shared between two different directories with the same test code.
|
||||||
|
return os.Getwd()
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
|
PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
|
||||||
See http://onsi.github.io/ginkgo/#previewing-specs for more information.
|
See http://onsi.github.io/ginkgo/#previewing-specs for more information.
|
||||||
@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report {
|
|||||||
|
|
||||||
err = global.Suite.BuildTree()
|
err = global.Suite.BuildTree()
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
suitePath, err := os.Getwd()
|
suitePath, err := getwd()
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
suitePath, err = filepath.Abs(suitePath)
|
suitePath, err = filepath.Abs(suitePath)
|
||||||
exitIfErr(err)
|
exitIfErr(err)
|
||||||
@ -783,8 +792,8 @@ DeferCleanup can be passed:
|
|||||||
For example:
|
For example:
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
|
DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
|
||||||
os.SetEnv("FOO", "BAR")
|
os.Setenv("FOO", "BAR")
|
||||||
})
|
})
|
||||||
|
|
||||||
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
|
||||||
|
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// Copyright (c) 2015, Wade Simmons
|
||||||
|
// All rights reserved.
|
||||||
|
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
// list of conditions and the following disclaimer.
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Package gocovmerge takes the results from multiple `go test -coverprofile`
|
||||||
|
// runs and merges them into one profile
|
||||||
|
|
||||||
|
// this file was originally taken from the gocovmerge project
|
||||||
|
// see also: https://go.shabbyrobe.org/gocovmerge
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"golang.org/x/tools/cover"
|
||||||
|
)
|
||||||
|
|
||||||
|
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
|
||||||
|
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
|
||||||
|
if i < len(profiles) && profiles[i].FileName == p.FileName {
|
||||||
|
MergeCoverProfiles(profiles[i], p)
|
||||||
|
} else {
|
||||||
|
profiles = append(profiles, nil)
|
||||||
|
copy(profiles[i+1:], profiles[i:])
|
||||||
|
profiles[i] = p
|
||||||
|
}
|
||||||
|
return profiles
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
|
||||||
|
if len(profiles) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, p := range profiles {
|
||||||
|
for _, b := range p.Blocks {
|
||||||
|
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
|
||||||
|
if into.Mode != merge.Mode {
|
||||||
|
return fmt.Errorf("cannot merge profiles with different modes")
|
||||||
|
}
|
||||||
|
// Since the blocks are sorted, we can keep track of where the last block
|
||||||
|
// was inserted and only look at the blocks after that as targets for merge
|
||||||
|
startIndex := 0
|
||||||
|
for _, b := range merge.Blocks {
|
||||||
|
var err error
|
||||||
|
startIndex, err = mergeProfileBlock(into, b, startIndex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
|
||||||
|
sortFunc := func(i int) bool {
|
||||||
|
pi := p.Blocks[i+startIndex]
|
||||||
|
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
if sortFunc(i) != true {
|
||||||
|
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
i += startIndex
|
||||||
|
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
|
||||||
|
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
|
||||||
|
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
|
||||||
|
}
|
||||||
|
switch p.Mode {
|
||||||
|
case "set":
|
||||||
|
p.Blocks[i].Count |= pb.Count
|
||||||
|
case "count", "atomic":
|
||||||
|
p.Blocks[i].Count += pb.Count
|
||||||
|
default:
|
||||||
|
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if i > 0 {
|
||||||
|
pa := p.Blocks[i-1]
|
||||||
|
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
|
||||||
|
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i < len(p.Blocks)-1 {
|
||||||
|
pa := p.Blocks[i+1]
|
||||||
|
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
|
||||||
|
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
|
||||||
|
copy(p.Blocks[i+1:], p.Blocks[i:])
|
||||||
|
p.Blocks[i] = pb
|
||||||
|
}
|
||||||
|
|
||||||
|
return i + 1, nil
|
||||||
|
}
|
38
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
38
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -12,6 +11,7 @@ import (
|
|||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/onsi/ginkgo/v2/reporters"
|
"github.com/onsi/ginkgo/v2/reporters"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
"golang.org/x/tools/cover"
|
||||||
)
|
)
|
||||||
|
|
||||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||||
@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
|
|||||||
return messages, nil
|
return messages, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loads each profile, combines them, deletes them, stores them in destination
|
// loads each profile, merges them, deletes them, stores them in destination
|
||||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||||
combined := &bytes.Buffer{}
|
var merged []*cover.Profile
|
||||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
for _, file := range profiles {
|
||||||
for i, profile := range profiles {
|
parsedProfiles, err := cover.ParseProfiles(file)
|
||||||
contents, err := os.ReadFile(profile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
return err
|
||||||
}
|
}
|
||||||
os.Remove(profile)
|
os.Remove(file)
|
||||||
|
for _, p := range parsedProfiles {
|
||||||
// remove the cover mode line from every file
|
merged = AddCoverProfile(merged, p)
|
||||||
// except the first one
|
|
||||||
if i > 0 {
|
|
||||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = combined.Write(contents)
|
|
||||||
|
|
||||||
// Add a newline to the end of every file if missing.
|
|
||||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
|
||||||
_, err = combined.Write([]byte("\n"))
|
|
||||||
}
|
}
|
||||||
|
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
return err
|
||||||
}
|
}
|
||||||
}
|
err = DumpCoverProfiles(merged, dst)
|
||||||
|
|
||||||
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
21
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
@ -5,9 +5,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
|
||||||
|
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
@ -44,8 +43,8 @@ type Node struct {
|
|||||||
SynchronizedAfterSuiteProc1Body func(SpecContext)
|
SynchronizedAfterSuiteProc1Body func(SpecContext)
|
||||||
SynchronizedAfterSuiteProc1BodyHasContext bool
|
SynchronizedAfterSuiteProc1BodyHasContext bool
|
||||||
|
|
||||||
ReportEachBody func(types.SpecReport)
|
ReportEachBody func(SpecContext, types.SpecReport)
|
||||||
ReportSuiteBody func(types.Report)
|
ReportSuiteBody func(SpecContext, types.Report)
|
||||||
|
|
||||||
MarkedFocus bool
|
MarkedFocus bool
|
||||||
MarkedPending bool
|
MarkedPending bool
|
||||||
@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
|||||||
node.Body = func(SpecContext) { body() }
|
node.Body = func(SpecContext) { body() }
|
||||||
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
|
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
|
||||||
if node.ReportEachBody == nil {
|
if node.ReportEachBody == nil {
|
||||||
node.ReportEachBody = arg.(func(types.SpecReport))
|
if fn, ok := arg.(func(types.SpecReport)); ok {
|
||||||
|
node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
|
||||||
|
} else {
|
||||||
|
node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
|
||||||
|
node.HasContext = true
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||||
trackedFunctionError = true
|
trackedFunctionError = true
|
||||||
@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
|||||||
}
|
}
|
||||||
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||||
if node.ReportSuiteBody == nil {
|
if node.ReportSuiteBody == nil {
|
||||||
node.ReportSuiteBody = arg.(func(types.Report))
|
if fn, ok := arg.(func(types.Report)); ok {
|
||||||
|
node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
|
||||||
|
} else {
|
||||||
|
node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
|
||||||
|
node.HasContext = true
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||||
trackedFunctionError = true
|
trackedFunctionError = true
|
||||||
|
6
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
6
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
|||||||
suite.writer.Truncate()
|
suite.writer.Truncate()
|
||||||
suite.outputInterceptor.StartInterceptingOutput()
|
suite.outputInterceptor.StartInterceptingOutput()
|
||||||
report := suite.currentSpecReport
|
report := suite.currentSpecReport
|
||||||
nodes[i].Body = func(SpecContext) {
|
nodes[i].Body = func(ctx SpecContext) {
|
||||||
nodes[i].ReportEachBody(report)
|
nodes[i].ReportEachBody(ctx, report)
|
||||||
}
|
}
|
||||||
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
|
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
|
||||||
|
|
||||||
@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
|||||||
report = report.Add(aggregatedReport)
|
report = report.Add(aggregatedReport)
|
||||||
}
|
}
|
||||||
|
|
||||||
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
|
||||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||||
|
|
||||||
suite.currentSpecReport.EndTime = time.Now()
|
suite.currentSpecReport.EndTime = time.Now()
|
||||||
|
35
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
35
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
|||||||
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
|
||||||
|
r.emitBlock("\n")
|
||||||
|
if r.conf.GithubOutput {
|
||||||
|
r.emitBlock(r.fi(1, "::group::%s", sectionName))
|
||||||
|
} else {
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
|
||||||
|
}
|
||||||
|
fn()
|
||||||
|
if r.conf.GithubOutput {
|
||||||
|
r.emitBlock(r.fi(1, "::endgroup::"))
|
||||||
|
} else {
|
||||||
|
r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||||
v := r.conf.Verbosity()
|
v := r.conf.Verbosity()
|
||||||
inParallel := report.RunningInParallel
|
inParallel := report.RunningInParallel
|
||||||
@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
|||||||
|
|
||||||
//Emit Stdout/Stderr Output
|
//Emit Stdout/Stderr Output
|
||||||
if showSeparateStdSection {
|
if showSeparateStdSection {
|
||||||
r.emitBlock("\n")
|
r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
|
||||||
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
|
|
||||||
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if showSeparateVisibilityAlwaysReportsSection {
|
if showSeparateVisibilityAlwaysReportsSection {
|
||||||
r.emitBlock("\n")
|
r.wrapTextBlock("Report Entries", func() {
|
||||||
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
|
|
||||||
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
|
||||||
r.emitReportEntry(1, entry)
|
r.emitReportEntry(1, entry)
|
||||||
}
|
}
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if showTimeline {
|
if showTimeline {
|
||||||
r.emitBlock("\n")
|
r.wrapTextBlock("Timeline", func() {
|
||||||
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
|
|
||||||
r.emitTimeline(1, report, timeline)
|
r.emitTimeline(1, report, timeline)
|
||||||
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emit Failure Message
|
// Emit Failure Message
|
||||||
@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
|
|||||||
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
|
||||||
highlightColor := r.highlightColorForState(state)
|
highlightColor := r.highlightColorForState(state)
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
|
||||||
|
if r.conf.GithubOutput {
|
||||||
|
r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
} else {
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||||
|
}
|
||||||
if failure.ForwardedPanic != "" {
|
if failure.ForwardedPanic != "" {
|
||||||
r.emitBlock("\n")
|
r.emitBlock("\n")
|
||||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||||
|
59
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
59
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
|
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
|
||||||
receives a SpecReport. They are called before the spec starts.
|
receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
ReportBeforeEach(func(report SpecReport) { // process report })
|
||||||
|
ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
|
||||||
|
// process report
|
||||||
|
}), NodeTimeout(1 * time.Minute))
|
||||||
|
|
||||||
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
|
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
|
||||||
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
|
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||||
*/
|
*/
|
||||||
func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
|
func ReportBeforeEach(body any, args ...any) bool {
|
||||||
combinedArgs := []interface{}{body}
|
combinedArgs := []interface{}{body}
|
||||||
combinedArgs = append(combinedArgs, args...)
|
combinedArgs = append(combinedArgs, args...)
|
||||||
|
|
||||||
@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
|
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
|
||||||
receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
|
ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
|
||||||
|
They are called after the spec has completed and receive the final report for the spec.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
ReportAfterEach(func(report SpecReport) { // process report })
|
||||||
|
ReportAfterEach(func(ctx SpecContext, report SpecReport) {
|
||||||
|
// process report
|
||||||
|
}), NodeTimeout(1 * time.Minute))
|
||||||
|
|
||||||
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
|
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
|
||||||
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
|
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||||
*/
|
*/
|
||||||
func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
func ReportAfterEach(body any, args ...any) bool {
|
||||||
combinedArgs := []interface{}{body}
|
combinedArgs := []interface{}{body}
|
||||||
combinedArgs = append(combinedArgs, args...)
|
combinedArgs = append(combinedArgs, args...)
|
||||||
|
|
||||||
@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
|
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
|
||||||
|
that can either receive Report or both SpecContext and Report for interruptible behavior.
|
||||||
|
|
||||||
|
Example Usage:
|
||||||
|
|
||||||
|
ReportBeforeSuite(func(r Report) { // process report })
|
||||||
|
ReportBeforeSuite(func(ctx SpecContext, r Report) {
|
||||||
|
// process report
|
||||||
|
}, NodeTimeout(1 * time.Minute))
|
||||||
|
|
||||||
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
|
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
|
||||||
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||||
@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
|||||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||||
|
|
||||||
|
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||||
*/
|
*/
|
||||||
func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
|
func ReportBeforeSuite(body any, args ...any) bool {
|
||||||
combinedArgs := []interface{}{body}
|
combinedArgs := []interface{}{body}
|
||||||
combinedArgs = append(combinedArgs, args...)
|
combinedArgs = append(combinedArgs, args...)
|
||||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
|
||||||
|
and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
|
||||||
|
|
||||||
|
Example Usage:
|
||||||
|
|
||||||
|
ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
|
||||||
|
ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
|
||||||
|
// process report
|
||||||
|
}, NodeTimeout(1 * time.Minute))
|
||||||
|
|
||||||
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
|
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
|
||||||
ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||||
|
|
||||||
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
|
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
|
||||||
all parallel nodes
|
all parallel nodes
|
||||||
@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
|||||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||||
|
|
||||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||||
|
|
||||||
|
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||||
*/
|
*/
|
||||||
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
|
func ReportAfterSuite(text string, body any, args ...interface{}) bool {
|
||||||
combinedArgs := []interface{}{body}
|
combinedArgs := []interface{}{body}
|
||||||
combinedArgs = append(combinedArgs, args...)
|
combinedArgs = append(combinedArgs, args...)
|
||||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
|
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
|
||||||
|
5
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
5
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
@ -89,6 +89,7 @@ type ReporterConfig struct {
|
|||||||
VeryVerbose bool
|
VeryVerbose bool
|
||||||
FullTrace bool
|
FullTrace bool
|
||||||
ShowNodeEvents bool
|
ShowNodeEvents bool
|
||||||
|
GithubOutput bool
|
||||||
|
|
||||||
JSONReport string
|
JSONReport string
|
||||||
JUnitReport string
|
JUnitReport string
|
||||||
@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{
|
|||||||
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||||
var SuiteConfigFlags = GinkgoFlags{
|
var SuiteConfigFlags = GinkgoFlags{
|
||||||
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
||||||
Usage: "The seed used to randomize the spec suite."},
|
Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
|
||||||
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
||||||
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
||||||
|
|
||||||
@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{
|
|||||||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||||
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
|
||||||
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
|
||||||
|
{KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
|
||||||
|
Usage: "If set, default reporter prints easier to manage output in Github Actions."},
|
||||||
|
|
||||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||||
|
11
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
11
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
@ -25,6 +25,7 @@ type GinkgoFlag struct {
|
|||||||
DeprecatedVersion string
|
DeprecatedVersion string
|
||||||
|
|
||||||
ExportAs string
|
ExportAs string
|
||||||
|
AlwaysExport bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type GinkgoFlags []GinkgoFlag
|
type GinkgoFlags []GinkgoFlag
|
||||||
@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
|||||||
iface := value.Interface()
|
iface := value.Interface()
|
||||||
switch value.Type() {
|
switch value.Type() {
|
||||||
case reflect.TypeOf(string("")):
|
case reflect.TypeOf(string("")):
|
||||||
if iface.(string) != "" {
|
if iface.(string) != "" || flag.AlwaysExport {
|
||||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(int64(0)):
|
case reflect.TypeOf(int64(0)):
|
||||||
if iface.(int64) != 0 {
|
if iface.(int64) != 0 || flag.AlwaysExport {
|
||||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(float64(0)):
|
case reflect.TypeOf(float64(0)):
|
||||||
if iface.(float64) != 0 {
|
if iface.(float64) != 0 || flag.AlwaysExport {
|
||||||
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(int(0)):
|
case reflect.TypeOf(int(0)):
|
||||||
if iface.(int) != 0 {
|
if iface.(int) != 0 || flag.AlwaysExport {
|
||||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(bool(true)):
|
case reflect.TypeOf(bool(true)):
|
||||||
@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
|
|||||||
result = append(result, fmt.Sprintf("--%s", name))
|
result = append(result, fmt.Sprintf("--%s", name))
|
||||||
}
|
}
|
||||||
case reflect.TypeOf(time.Duration(0)):
|
case reflect.TypeOf(time.Duration(0)):
|
||||||
if iface.(time.Duration) != time.Duration(0) {
|
if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
|
||||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
const VERSION = "2.15.0"
|
const VERSION = "2.17.1"
|
||||||
|
13
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
13
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
## 1.32.0
|
||||||
|
|
||||||
|
### Maintenance
|
||||||
|
- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197]
|
||||||
|
|
||||||
|
This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one.
|
||||||
|
|
||||||
|
- chore: test with Go 1.22 (#733) [32ef35e]
|
||||||
|
- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387]
|
||||||
|
- Bump github-pages and jekyll-feed in /docs (#732) [b71e477]
|
||||||
|
- docs: fix typo and broken anchor link to gstruct [f460154]
|
||||||
|
- docs: fix HaveEach matcher signature [a2862e4]
|
||||||
|
|
||||||
## 1.31.1
|
## 1.31.1
|
||||||
|
|
||||||
### Fixes
|
### Fixes
|
||||||
|
2
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
2
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const GOMEGA_VERSION = "1.31.1"
|
const GOMEGA_VERSION = "1.32.0"
|
||||||
|
|
||||||
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
||||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||||
|
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package cover provides support for parsing coverage profiles
|
||||||
|
// generated by "go test -coverprofile=cover.out".
|
||||||
|
package cover // import "golang.org/x/tools/cover"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Profile represents the profiling data for a specific file.
|
||||||
|
type Profile struct {
|
||||||
|
FileName string
|
||||||
|
Mode string
|
||||||
|
Blocks []ProfileBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProfileBlock represents a single block of profiling data.
|
||||||
|
type ProfileBlock struct {
|
||||||
|
StartLine, StartCol int
|
||||||
|
EndLine, EndCol int
|
||||||
|
NumStmt, Count int
|
||||||
|
}
|
||||||
|
|
||||||
|
type byFileName []*Profile
|
||||||
|
|
||||||
|
func (p byFileName) Len() int { return len(p) }
|
||||||
|
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
|
||||||
|
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
// ParseProfiles parses profile data in the specified file and returns a
|
||||||
|
// Profile for each source file described therein.
|
||||||
|
func ParseProfiles(fileName string) ([]*Profile, error) {
|
||||||
|
pf, err := os.Open(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer pf.Close()
|
||||||
|
return ParseProfilesFromReader(pf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseProfilesFromReader parses profile data from the Reader and
|
||||||
|
// returns a Profile for each source file described therein.
|
||||||
|
func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
|
||||||
|
// First line is "mode: foo", where foo is "set", "count", or "atomic".
|
||||||
|
// Rest of file is in the format
|
||||||
|
// encoding/base64/base64.go:34.44,37.40 3 1
|
||||||
|
// where the fields are: name.go:line.column,line.column numberOfStatements count
|
||||||
|
files := make(map[string]*Profile)
|
||||||
|
s := bufio.NewScanner(rd)
|
||||||
|
mode := ""
|
||||||
|
for s.Scan() {
|
||||||
|
line := s.Text()
|
||||||
|
if mode == "" {
|
||||||
|
const p = "mode: "
|
||||||
|
if !strings.HasPrefix(line, p) || line == p {
|
||||||
|
return nil, fmt.Errorf("bad mode line: %v", line)
|
||||||
|
}
|
||||||
|
mode = line[len(p):]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fn, b, err := parseLine(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
|
||||||
|
}
|
||||||
|
p := files[fn]
|
||||||
|
if p == nil {
|
||||||
|
p = &Profile{
|
||||||
|
FileName: fn,
|
||||||
|
Mode: mode,
|
||||||
|
}
|
||||||
|
files[fn] = p
|
||||||
|
}
|
||||||
|
p.Blocks = append(p.Blocks, b)
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, p := range files {
|
||||||
|
sort.Sort(blocksByStart(p.Blocks))
|
||||||
|
// Merge samples from the same location.
|
||||||
|
j := 1
|
||||||
|
for i := 1; i < len(p.Blocks); i++ {
|
||||||
|
b := p.Blocks[i]
|
||||||
|
last := p.Blocks[j-1]
|
||||||
|
if b.StartLine == last.StartLine &&
|
||||||
|
b.StartCol == last.StartCol &&
|
||||||
|
b.EndLine == last.EndLine &&
|
||||||
|
b.EndCol == last.EndCol {
|
||||||
|
if b.NumStmt != last.NumStmt {
|
||||||
|
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
|
||||||
|
}
|
||||||
|
if mode == "set" {
|
||||||
|
p.Blocks[j-1].Count |= b.Count
|
||||||
|
} else {
|
||||||
|
p.Blocks[j-1].Count += b.Count
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.Blocks[j] = b
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
p.Blocks = p.Blocks[:j]
|
||||||
|
}
|
||||||
|
// Generate a sorted slice.
|
||||||
|
profiles := make([]*Profile, 0, len(files))
|
||||||
|
for _, profile := range files {
|
||||||
|
profiles = append(profiles, profile)
|
||||||
|
}
|
||||||
|
sort.Sort(byFileName(profiles))
|
||||||
|
return profiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseLine parses a line from a coverage file.
|
||||||
|
// It is equivalent to the regex
|
||||||
|
// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
|
||||||
|
//
|
||||||
|
// However, it is much faster: https://golang.org/cl/179377
|
||||||
|
func parseLine(l string) (fileName string, block ProfileBlock, err error) {
|
||||||
|
end := len(l)
|
||||||
|
|
||||||
|
b := ProfileBlock{}
|
||||||
|
b.Count, end, err = seekBack(l, ' ', end, "Count")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
|
||||||
|
if err != nil {
|
||||||
|
return "", b, err
|
||||||
|
}
|
||||||
|
fn := l[0:end]
|
||||||
|
if fn == "" {
|
||||||
|
return "", b, errors.New("a FileName cannot be blank")
|
||||||
|
}
|
||||||
|
return fn, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// seekBack searches backwards from end to find sep in l, then returns the
|
||||||
|
// value between sep and end as an integer.
|
||||||
|
// If seekBack fails, the returned error will reference what.
|
||||||
|
func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
|
||||||
|
// Since we're seeking backwards and we know only ASCII is legal for these values,
|
||||||
|
// we can ignore the possibility of non-ASCII characters.
|
||||||
|
for start := end - 1; start >= 0; start-- {
|
||||||
|
if l[start] == sep {
|
||||||
|
i, err := strconv.Atoi(l[start+1 : end])
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
|
||||||
|
}
|
||||||
|
if i < 0 {
|
||||||
|
return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
|
||||||
|
}
|
||||||
|
return i, start, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
|
||||||
|
}
|
||||||
|
|
||||||
|
type blocksByStart []ProfileBlock
|
||||||
|
|
||||||
|
func (b blocksByStart) Len() int { return len(b) }
|
||||||
|
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b blocksByStart) Less(i, j int) bool {
|
||||||
|
bi, bj := b[i], b[j]
|
||||||
|
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boundary represents the position in a source file of the beginning or end of a
|
||||||
|
// block as reported by the coverage profile. In HTML mode, it will correspond to
|
||||||
|
// the opening or closing of a <span> tag and will be used to colorize the source
|
||||||
|
type Boundary struct {
|
||||||
|
Offset int // Location as a byte offset in the source file.
|
||||||
|
Start bool // Is this the start of a block?
|
||||||
|
Count int // Event count from the cover profile.
|
||||||
|
Norm float64 // Count normalized to [0..1].
|
||||||
|
Index int // Order in input file.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
|
||||||
|
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
|
||||||
|
// Find maximum count.
|
||||||
|
max := 0
|
||||||
|
for _, b := range p.Blocks {
|
||||||
|
if b.Count > max {
|
||||||
|
max = b.Count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Divisor for normalization.
|
||||||
|
divisor := math.Log(float64(max))
|
||||||
|
|
||||||
|
// boundary returns a Boundary, populating the Norm field with a normalized Count.
|
||||||
|
index := 0
|
||||||
|
boundary := func(offset int, start bool, count int) Boundary {
|
||||||
|
b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
|
||||||
|
index++
|
||||||
|
if !start || count == 0 {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
if max <= 1 {
|
||||||
|
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
|
||||||
|
} else if count > 0 {
|
||||||
|
b.Norm = math.Log(float64(count)) / divisor
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
line, col := 1, 2 // TODO: Why is this 2?
|
||||||
|
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
|
||||||
|
b := p.Blocks[bi]
|
||||||
|
if b.StartLine == line && b.StartCol == col {
|
||||||
|
boundaries = append(boundaries, boundary(si, true, b.Count))
|
||||||
|
}
|
||||||
|
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
|
||||||
|
boundaries = append(boundaries, boundary(si, false, 0))
|
||||||
|
bi++
|
||||||
|
continue // Don't advance through src; maybe the next block starts here.
|
||||||
|
}
|
||||||
|
if src[si] == '\n' {
|
||||||
|
line++
|
||||||
|
col = 0
|
||||||
|
}
|
||||||
|
col++
|
||||||
|
si++
|
||||||
|
}
|
||||||
|
sort.Sort(boundariesByPos(boundaries))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type boundariesByPos []Boundary
|
||||||
|
|
||||||
|
func (b boundariesByPos) Len() int { return len(b) }
|
||||||
|
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b boundariesByPos) Less(i, j int) bool {
|
||||||
|
if b[i].Offset == b[j].Offset {
|
||||||
|
// Boundaries at the same offset should be ordered according to
|
||||||
|
// their original position.
|
||||||
|
return b[i].Index < b[j].Index
|
||||||
|
}
|
||||||
|
return b[i].Offset < b[j].Offset
|
||||||
|
}
|
3
vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
generated
vendored
@ -326,6 +326,9 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if config.PostProcessSpec != nil {
|
||||||
|
return config.PostProcessSpec(a.spec)
|
||||||
|
}
|
||||||
return a.spec, nil
|
return a.spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
3
vendor/k8s.io/kube-openapi/pkg/common/common.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/common/common.go
generated
vendored
@ -164,6 +164,9 @@ type OpenAPIV3Config struct {
|
|||||||
// It is an optional function to customize model names.
|
// It is an optional function to customize model names.
|
||||||
GetDefinitionName func(name string) (string, spec.Extensions)
|
GetDefinitionName func(name string) (string, spec.Extensions)
|
||||||
|
|
||||||
|
// PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving.
|
||||||
|
PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error)
|
||||||
|
|
||||||
// SecuritySchemes is list of all security schemes for OpenAPI service.
|
// SecuritySchemes is list of all security schemes for OpenAPI service.
|
||||||
SecuritySchemes spec3.SecuritySchemes
|
SecuritySchemes spec3.SecuritySchemes
|
||||||
|
|
||||||
|
3
vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
generated
vendored
@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if union.Discriminator != nil && len(union.Fields) == 0 {
|
|
||||||
return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator)
|
|
||||||
}
|
|
||||||
return union, nil
|
return union, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
33
vendor/modules.txt
vendored
33
vendor/modules.txt
vendored
@ -253,7 +253,7 @@ github.com/emicklei/go-restful/v3/log
|
|||||||
# github.com/evanphx/json-patch v5.6.0+incompatible
|
# github.com/evanphx/json-patch v5.6.0+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/evanphx/json-patch
|
github.com/evanphx/json-patch
|
||||||
# github.com/evanphx/json-patch/v5 v5.8.0
|
# github.com/evanphx/json-patch/v5 v5.9.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/evanphx/json-patch/v5
|
github.com/evanphx/json-patch/v5
|
||||||
github.com/evanphx/json-patch/v5/internal/json
|
github.com/evanphx/json-patch/v5/internal/json
|
||||||
@ -323,7 +323,7 @@ github.com/golang/protobuf/ptypes/any
|
|||||||
github.com/golang/protobuf/ptypes/duration
|
github.com/golang/protobuf/ptypes/duration
|
||||||
github.com/golang/protobuf/ptypes/timestamp
|
github.com/golang/protobuf/ptypes/timestamp
|
||||||
github.com/golang/protobuf/ptypes/wrappers
|
github.com/golang/protobuf/ptypes/wrappers
|
||||||
# github.com/google/cel-go v0.17.7
|
# github.com/google/cel-go v0.17.8
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/google/cel-go/cel
|
github.com/google/cel-go/cel
|
||||||
github.com/google/cel-go/checker
|
github.com/google/cel-go/checker
|
||||||
@ -518,7 +518,7 @@ github.com/munnerz/goautoneg
|
|||||||
# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
|
# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
|
||||||
## explicit
|
## explicit
|
||||||
github.com/mxk/go-flowrate/flowrate
|
github.com/mxk/go-flowrate/flowrate
|
||||||
# github.com/onsi/ginkgo/v2 v2.15.0
|
# github.com/onsi/ginkgo/v2 v2.17.1
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/onsi/ginkgo/v2
|
github.com/onsi/ginkgo/v2
|
||||||
github.com/onsi/ginkgo/v2/config
|
github.com/onsi/ginkgo/v2/config
|
||||||
@ -540,7 +540,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
|
|||||||
github.com/onsi/ginkgo/v2/internal/testingtproxy
|
github.com/onsi/ginkgo/v2/internal/testingtproxy
|
||||||
github.com/onsi/ginkgo/v2/reporters
|
github.com/onsi/ginkgo/v2/reporters
|
||||||
github.com/onsi/ginkgo/v2/types
|
github.com/onsi/ginkgo/v2/types
|
||||||
# github.com/onsi/gomega v1.31.1
|
# github.com/onsi/gomega v1.32.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/onsi/gomega
|
github.com/onsi/gomega
|
||||||
github.com/onsi/gomega/format
|
github.com/onsi/gomega/format
|
||||||
@ -804,8 +804,9 @@ golang.org/x/text/width
|
|||||||
# golang.org/x/time v0.3.0
|
# golang.org/x/time v0.3.0
|
||||||
## explicit
|
## explicit
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
# golang.org/x/tools v0.16.1
|
# golang.org/x/tools v0.18.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
|
golang.org/x/tools/cover
|
||||||
golang.org/x/tools/go/ast/inspector
|
golang.org/x/tools/go/ast/inspector
|
||||||
# gomodules.xyz/jsonpatch/v2 v2.4.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
# gomodules.xyz/jsonpatch/v2 v2.4.0 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
@ -942,7 +943,7 @@ gopkg.in/yaml.v2
|
|||||||
# gopkg.in/yaml.v3 v3.0.1
|
# gopkg.in/yaml.v3 v3.0.1
|
||||||
## explicit
|
## explicit
|
||||||
gopkg.in/yaml.v3
|
gopkg.in/yaml.v3
|
||||||
# k8s.io/api v0.29.3 => k8s.io/api v0.29.3
|
# k8s.io/api v0.30.0 => k8s.io/api v0.29.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
k8s.io/api/admission/v1
|
k8s.io/api/admission/v1
|
||||||
k8s.io/api/admission/v1beta1
|
k8s.io/api/admission/v1beta1
|
||||||
@ -998,12 +999,12 @@ k8s.io/api/scheduling/v1beta1
|
|||||||
k8s.io/api/storage/v1
|
k8s.io/api/storage/v1
|
||||||
k8s.io/api/storage/v1alpha1
|
k8s.io/api/storage/v1alpha1
|
||||||
k8s.io/api/storage/v1beta1
|
k8s.io/api/storage/v1beta1
|
||||||
# k8s.io/apiextensions-apiserver v0.29.2 => k8s.io/apiextensions-apiserver v0.29.3
|
# k8s.io/apiextensions-apiserver v0.30.0 => k8s.io/apiextensions-apiserver v0.29.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||||
k8s.io/apiextensions-apiserver/pkg/features
|
k8s.io/apiextensions-apiserver/pkg/features
|
||||||
# k8s.io/apimachinery v0.29.3 => k8s.io/apimachinery v0.29.3
|
# k8s.io/apimachinery v0.30.0 => k8s.io/apimachinery v0.29.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
k8s.io/apimachinery/pkg/api/equality
|
k8s.io/apimachinery/pkg/api/equality
|
||||||
k8s.io/apimachinery/pkg/api/errors
|
k8s.io/apimachinery/pkg/api/errors
|
||||||
@ -1066,7 +1067,7 @@ k8s.io/apimachinery/pkg/watch
|
|||||||
k8s.io/apimachinery/third_party/forked/golang/json
|
k8s.io/apimachinery/third_party/forked/golang/json
|
||||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||||
# k8s.io/apiserver v0.29.3 => k8s.io/apiserver v0.29.3
|
# k8s.io/apiserver v0.30.0 => k8s.io/apiserver v0.29.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
k8s.io/apiserver/pkg/admission
|
k8s.io/apiserver/pkg/admission
|
||||||
k8s.io/apiserver/pkg/admission/cel
|
k8s.io/apiserver/pkg/admission/cel
|
||||||
@ -1500,7 +1501,7 @@ k8s.io/cloud-provider/names
|
|||||||
k8s.io/cloud-provider/options
|
k8s.io/cloud-provider/options
|
||||||
k8s.io/cloud-provider/volume
|
k8s.io/cloud-provider/volume
|
||||||
k8s.io/cloud-provider/volume/helpers
|
k8s.io/cloud-provider/volume/helpers
|
||||||
# k8s.io/component-base v0.29.3 => k8s.io/component-base v0.29.3
|
# k8s.io/component-base v0.30.0 => k8s.io/component-base v0.29.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
k8s.io/component-base/cli/flag
|
k8s.io/component-base/cli/flag
|
||||||
k8s.io/component-base/config
|
k8s.io/component-base/config
|
||||||
@ -1555,8 +1556,8 @@ k8s.io/kms/apis/v1beta1
|
|||||||
k8s.io/kms/apis/v2
|
k8s.io/kms/apis/v2
|
||||||
k8s.io/kms/pkg/service
|
k8s.io/kms/pkg/service
|
||||||
k8s.io/kms/pkg/util
|
k8s.io/kms/pkg/util
|
||||||
# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
|
# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
|
||||||
## explicit; go 1.19
|
## explicit; go 1.20
|
||||||
k8s.io/kube-openapi/pkg/builder
|
k8s.io/kube-openapi/pkg/builder
|
||||||
k8s.io/kube-openapi/pkg/builder3
|
k8s.io/kube-openapi/pkg/builder3
|
||||||
k8s.io/kube-openapi/pkg/builder3/util
|
k8s.io/kube-openapi/pkg/builder3/util
|
||||||
@ -1676,14 +1677,14 @@ k8s.io/utils/ptr
|
|||||||
k8s.io/utils/strings
|
k8s.io/utils/strings
|
||||||
k8s.io/utils/strings/slices
|
k8s.io/utils/strings/slices
|
||||||
k8s.io/utils/trace
|
k8s.io/utils/trace
|
||||||
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0
|
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
|
||||||
# sigs.k8s.io/controller-runtime v0.17.3
|
# sigs.k8s.io/controller-runtime v0.18.2
|
||||||
## explicit; go 1.21
|
## explicit; go 1.22.0
|
||||||
sigs.k8s.io/controller-runtime/pkg/cache
|
sigs.k8s.io/controller-runtime/pkg/cache
|
||||||
sigs.k8s.io/controller-runtime/pkg/cache/internal
|
sigs.k8s.io/controller-runtime/pkg/cache/internal
|
||||||
sigs.k8s.io/controller-runtime/pkg/certwatcher
|
sigs.k8s.io/controller-runtime/pkg/certwatcher
|
||||||
@ -1693,7 +1694,6 @@ sigs.k8s.io/controller-runtime/pkg/client/apiutil
|
|||||||
sigs.k8s.io/controller-runtime/pkg/client/config
|
sigs.k8s.io/controller-runtime/pkg/client/config
|
||||||
sigs.k8s.io/controller-runtime/pkg/cluster
|
sigs.k8s.io/controller-runtime/pkg/cluster
|
||||||
sigs.k8s.io/controller-runtime/pkg/config
|
sigs.k8s.io/controller-runtime/pkg/config
|
||||||
sigs.k8s.io/controller-runtime/pkg/config/v1alpha1
|
|
||||||
sigs.k8s.io/controller-runtime/pkg/controller
|
sigs.k8s.io/controller-runtime/pkg/controller
|
||||||
sigs.k8s.io/controller-runtime/pkg/event
|
sigs.k8s.io/controller-runtime/pkg/event
|
||||||
sigs.k8s.io/controller-runtime/pkg/handler
|
sigs.k8s.io/controller-runtime/pkg/handler
|
||||||
@ -1716,7 +1716,6 @@ sigs.k8s.io/controller-runtime/pkg/predicate
|
|||||||
sigs.k8s.io/controller-runtime/pkg/ratelimiter
|
sigs.k8s.io/controller-runtime/pkg/ratelimiter
|
||||||
sigs.k8s.io/controller-runtime/pkg/reconcile
|
sigs.k8s.io/controller-runtime/pkg/reconcile
|
||||||
sigs.k8s.io/controller-runtime/pkg/recorder
|
sigs.k8s.io/controller-runtime/pkg/recorder
|
||||||
sigs.k8s.io/controller-runtime/pkg/scheme
|
|
||||||
sigs.k8s.io/controller-runtime/pkg/source
|
sigs.k8s.io/controller-runtime/pkg/source
|
||||||
sigs.k8s.io/controller-runtime/pkg/webhook
|
sigs.k8s.io/controller-runtime/pkg/webhook
|
||||||
sigs.k8s.io/controller-runtime/pkg/webhook/admission
|
sigs.k8s.io/controller-runtime/pkg/webhook/admission
|
||||||
|
50
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
50
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
@ -39,11 +39,9 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
|
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
log = logf.RuntimeLog.WithName("object-cache")
|
|
||||||
defaultSyncPeriod = 10 * time.Hour
|
defaultSyncPeriod = 10 * time.Hour
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -203,6 +201,9 @@ type Options struct {
|
|||||||
|
|
||||||
// DefaultTransform will be used as transform for all object types
|
// DefaultTransform will be used as transform for all object types
|
||||||
// unless there is already one set in ByObject or DefaultNamespaces.
|
// unless there is already one set in ByObject or DefaultNamespaces.
|
||||||
|
//
|
||||||
|
// A typical usecase for this is to use TransformStripManagedFields
|
||||||
|
// to reduce the caches memory usage.
|
||||||
DefaultTransform toolscache.TransformFunc
|
DefaultTransform toolscache.TransformFunc
|
||||||
|
|
||||||
// DefaultWatchErrorHandler will be used to the WatchErrorHandler which is called
|
// DefaultWatchErrorHandler will be used to the WatchErrorHandler which is called
|
||||||
@ -222,7 +223,7 @@ type Options struct {
|
|||||||
DefaultUnsafeDisableDeepCopy *bool
|
DefaultUnsafeDisableDeepCopy *bool
|
||||||
|
|
||||||
// ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object.
|
// ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object.
|
||||||
// object, this will fall through to Default* settings.
|
// If unset, this will fall through to the Default* settings.
|
||||||
ByObject map[client.Object]ByObject
|
ByObject map[client.Object]ByObject
|
||||||
|
|
||||||
// newInformer allows overriding of NewSharedIndexInformer for testing.
|
// newInformer allows overriding of NewSharedIndexInformer for testing.
|
||||||
@ -346,6 +347,20 @@ func New(cfg *rest.Config, opts Options) (Cache, error) {
|
|||||||
return delegating, nil
|
return delegating, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TransformStripManagedFields strips the managed fields of an object before it is committed to the cache.
|
||||||
|
// If you are not explicitly accessing managedFields from your code, setting this as `DefaultTransform`
|
||||||
|
// on the cache can lead to a significant reduction in memory usage.
|
||||||
|
func TransformStripManagedFields() toolscache.TransformFunc {
|
||||||
|
return func(in any) (any, error) {
|
||||||
|
// Nilcheck managed fields to avoid hitting https://github.com/kubernetes/kubernetes/issues/124337
|
||||||
|
if obj, err := meta.Accessor(in); err == nil && obj.GetManagedFields() != nil {
|
||||||
|
obj.SetManagedFields(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return in, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func optionDefaultsToConfig(opts *Options) Config {
|
func optionDefaultsToConfig(opts *Options) Config {
|
||||||
return Config{
|
return Config{
|
||||||
LabelSelector: opts.DefaultLabelSelector,
|
LabelSelector: opts.DefaultLabelSelector,
|
||||||
@ -419,19 +434,6 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for namespace, cfg := range opts.DefaultNamespaces {
|
|
||||||
cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts))
|
|
||||||
if namespace == metav1.NamespaceAll {
|
|
||||||
cfg.FieldSelector = fields.AndSelectors(
|
|
||||||
appendIfNotNil(
|
|
||||||
namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)),
|
|
||||||
cfg.FieldSelector,
|
|
||||||
)...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
opts.DefaultNamespaces[namespace] = cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
for obj, byObject := range opts.ByObject {
|
for obj, byObject := range opts.ByObject {
|
||||||
isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper)
|
isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -485,6 +487,22 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
|
|||||||
opts.ByObject[obj] = byObject
|
opts.ByObject[obj] = byObject
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Default namespaces after byObject has been defaulted, otherwise a namespace without selectors
|
||||||
|
// will get the `Default` selectors, then get copied to byObject and then not get defaulted from
|
||||||
|
// byObject, as it already has selectors.
|
||||||
|
for namespace, cfg := range opts.DefaultNamespaces {
|
||||||
|
cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts))
|
||||||
|
if namespace == metav1.NamespaceAll {
|
||||||
|
cfg.FieldSelector = fields.AndSelectors(
|
||||||
|
appendIfNotNil(
|
||||||
|
namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)),
|
||||||
|
cfg.FieldSelector,
|
||||||
|
)...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
opts.DefaultNamespaces[namespace] = cfg
|
||||||
|
}
|
||||||
|
|
||||||
// Default the resync period to 10 hours if unset
|
// Default the resync period to 10 hours if unset
|
||||||
if opts.SyncPeriod == nil {
|
if opts.SyncPeriod == nil {
|
||||||
opts.SyncPeriod = &defaultSyncPeriod
|
opts.SyncPeriod = &defaultSyncPeriod
|
||||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
@ -36,7 +36,7 @@ import (
|
|||||||
// CacheReader is a client.Reader.
|
// CacheReader is a client.Reader.
|
||||||
var _ client.Reader = &CacheReader{}
|
var _ client.Reader = &CacheReader{}
|
||||||
|
|
||||||
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type.
|
// CacheReader wraps a cache.Index to implement the client.Reader interface for a single type.
|
||||||
type CacheReader struct {
|
type CacheReader struct {
|
||||||
// indexer is the underlying indexer wrapped by this cache.
|
// indexer is the underlying indexer wrapped by this cache.
|
||||||
indexer cache.Indexer
|
indexer cache.Indexer
|
||||||
|
13
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
generated
vendored
13
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
generated
vendored
@ -18,6 +18,7 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -186,10 +187,14 @@ type Informers struct {
|
|||||||
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
|
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
|
||||||
// It doesn't return start because it can't return an error, and it's not a runnable directly.
|
// It doesn't return start because it can't return an error, and it's not a runnable directly.
|
||||||
func (ip *Informers) Start(ctx context.Context) error {
|
func (ip *Informers) Start(ctx context.Context) error {
|
||||||
func() {
|
if err := func() error {
|
||||||
ip.mu.Lock()
|
ip.mu.Lock()
|
||||||
defer ip.mu.Unlock()
|
defer ip.mu.Unlock()
|
||||||
|
|
||||||
|
if ip.started {
|
||||||
|
return errors.New("Informer already started") //nolint:stylecheck
|
||||||
|
}
|
||||||
|
|
||||||
// Set the context so it can be passed to informers that are added later
|
// Set the context so it can be passed to informers that are added later
|
||||||
ip.ctx = ctx
|
ip.ctx = ctx
|
||||||
|
|
||||||
@ -207,7 +212,11 @@ func (ip *Informers) Start(ctx context.Context) error {
|
|||||||
// Set started to true so we immediately start any informers added later.
|
// Set started to true so we immediately start any informers added later.
|
||||||
ip.started = true
|
ip.started = true
|
||||||
close(ip.startWait)
|
close(ip.startWait)
|
||||||
}()
|
|
||||||
|
return nil
|
||||||
|
}(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
<-ctx.Done() // Block until the context is done
|
<-ctx.Done() // Block until the context is done
|
||||||
ip.mu.Lock()
|
ip.mu.Lock()
|
||||||
ip.stopped = true // Set stopped to true so we don't start any new informers
|
ip.stopped = true // Set stopped to true so we don't start any new informers
|
||||||
|
12
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
12
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
@ -163,12 +163,13 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *multiNamespaceCache) Start(ctx context.Context) error {
|
func (c *multiNamespaceCache) Start(ctx context.Context) error {
|
||||||
|
errs := make(chan error)
|
||||||
// start global cache
|
// start global cache
|
||||||
if c.clusterCache != nil {
|
if c.clusterCache != nil {
|
||||||
go func() {
|
go func() {
|
||||||
err := c.clusterCache.Start(ctx)
|
err := c.clusterCache.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "cluster scoped cache failed to start")
|
errs <- fmt.Errorf("failed to start cluster-scoped cache: %w", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -177,13 +178,16 @@ func (c *multiNamespaceCache) Start(ctx context.Context) error {
|
|||||||
for ns, cache := range c.namespaceToCache {
|
for ns, cache := range c.namespaceToCache {
|
||||||
go func(ns string, cache Cache) {
|
go func(ns string, cache Cache) {
|
||||||
if err := cache.Start(ctx); err != nil {
|
if err := cache.Start(ctx); err != nil {
|
||||||
log.Error(err, "multi-namespace cache failed to start namespaced informer", "namespace", ns)
|
errs <- fmt.Errorf("failed to start cache for namespace %s: %w", ns, err)
|
||||||
}
|
}
|
||||||
}(ns, cache)
|
}(ns, cache)
|
||||||
}
|
}
|
||||||
|
select {
|
||||||
<-ctx.Done()
|
case <-ctx.Done():
|
||||||
return nil
|
return nil
|
||||||
|
case err := <-errs:
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool {
|
func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool {
|
||||||
|
4
vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
generated
vendored
4
vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
generated
vendored
@ -523,8 +523,8 @@ func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption)
|
|||||||
return co
|
return co
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyToSubresourceCreate applies the the configuration on the given create options.
|
// ApplyToSubResourceCreate applies the the configuration on the given create options.
|
||||||
func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) {
|
func (co *SubResourceCreateOptions) ApplyToSubResourceCreate(o *SubResourceCreateOptions) {
|
||||||
co.CreateOptions.ApplyToCreate(&co.CreateOptions)
|
co.CreateOptions.ApplyToCreate(&co.CreateOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
106
vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
generated
vendored
Normal file
106
vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithFieldOwner wraps a Client and adds the fieldOwner as the field
|
||||||
|
// manager to all write requests from this client. If additional [FieldOwner]
|
||||||
|
// options are specified on methods of this client, the value specified here
|
||||||
|
// will be overridden.
|
||||||
|
func WithFieldOwner(c Client, fieldOwner string) Client {
|
||||||
|
return &clientWithFieldManager{
|
||||||
|
owner: fieldOwner,
|
||||||
|
c: c,
|
||||||
|
Reader: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientWithFieldManager struct {
|
||||||
|
owner string
|
||||||
|
c Client
|
||||||
|
Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||||
|
return f.c.Create(ctx, obj, append([]CreateOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||||
|
return f.c.Update(ctx, obj, append([]UpdateOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||||
|
return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||||
|
return f.c.Delete(ctx, obj, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||||
|
return f.c.DeleteAllOf(ctx, obj, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Scheme() *runtime.Scheme { return f.c.Scheme() }
|
||||||
|
func (f *clientWithFieldManager) RESTMapper() meta.RESTMapper { return f.c.RESTMapper() }
|
||||||
|
func (f *clientWithFieldManager) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
|
||||||
|
return f.c.GroupVersionKindFor(obj)
|
||||||
|
}
|
||||||
|
func (f *clientWithFieldManager) IsObjectNamespaced(obj runtime.Object) (bool, error) {
|
||||||
|
return f.c.IsObjectNamespaced(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) Status() StatusWriter {
|
||||||
|
return &subresourceClientWithFieldOwner{
|
||||||
|
owner: f.owner,
|
||||||
|
subresourceWriter: f.c.Status(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *clientWithFieldManager) SubResource(subresource string) SubResourceClient {
|
||||||
|
c := f.c.SubResource(subresource)
|
||||||
|
return &subresourceClientWithFieldOwner{
|
||||||
|
owner: f.owner,
|
||||||
|
subresourceWriter: c,
|
||||||
|
SubResourceReader: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type subresourceClientWithFieldOwner struct {
|
||||||
|
owner string
|
||||||
|
subresourceWriter SubResourceWriter
|
||||||
|
SubResourceReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *subresourceClientWithFieldOwner) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error {
|
||||||
|
return f.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *subresourceClientWithFieldOwner) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error {
|
||||||
|
return f.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *subresourceClientWithFieldOwner) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error {
|
||||||
|
return f.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{FieldOwner(f.owner)}, opts...)...)
|
||||||
|
}
|
112
vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
generated
vendored
112
vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
generated
vendored
@ -1,112 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ControllerManagerConfiguration defines the functions necessary to parse a config file
|
|
||||||
// and to configure the Options struct for the ctrl.Manager.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerManagerConfiguration interface {
|
|
||||||
runtime.Object
|
|
||||||
|
|
||||||
// Complete returns the versioned configuration
|
|
||||||
Complete() (v1alpha1.ControllerManagerConfigurationSpec, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeferredFileLoader is used to configure the decoder for loading controller
|
|
||||||
// runtime component config types.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type DeferredFileLoader struct {
|
|
||||||
ControllerManagerConfiguration
|
|
||||||
path string
|
|
||||||
scheme *runtime.Scheme
|
|
||||||
once sync.Once
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// File will set up the deferred file loader for the configuration
|
|
||||||
// this will also configure the defaults for the loader if nothing is
|
|
||||||
//
|
|
||||||
// Defaults:
|
|
||||||
// * Path: "./config.yaml"
|
|
||||||
// * Kind: GenericControllerManagerConfiguration
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
func File() *DeferredFileLoader {
|
|
||||||
scheme := runtime.NewScheme()
|
|
||||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
|
||||||
return &DeferredFileLoader{
|
|
||||||
path: "./config.yaml",
|
|
||||||
ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{},
|
|
||||||
scheme: scheme,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Complete will use sync.Once to set the scheme.
|
|
||||||
func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) {
|
|
||||||
d.once.Do(d.loadFile)
|
|
||||||
if d.err != nil {
|
|
||||||
return v1alpha1.ControllerManagerConfigurationSpec{}, d.err
|
|
||||||
}
|
|
||||||
return d.ControllerManagerConfiguration.Complete()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtPath will set the path to load the file for the decoder.
|
|
||||||
func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader {
|
|
||||||
d.path = path
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// OfKind will set the type to be used for decoding the file into.
|
|
||||||
func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader {
|
|
||||||
d.ControllerManagerConfiguration = obj
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadFile is used from the mutex.Once to load the file.
|
|
||||||
func (d *DeferredFileLoader) loadFile() {
|
|
||||||
if d.scheme == nil {
|
|
||||||
d.err = fmt.Errorf("scheme not supplied to controller configuration loader")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := os.ReadFile(d.path)
|
|
||||||
if err != nil {
|
|
||||||
d.err = fmt.Errorf("could not read file at %s", d.path)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
codecs := serializer.NewCodecFactory(d.scheme)
|
|
||||||
|
|
||||||
// Regardless of if the bytes are of any external version,
|
|
||||||
// it will be read successfully and converted into the internal version
|
|
||||||
if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil {
|
|
||||||
d.err = fmt.Errorf("could not decode file into runtime.Object")
|
|
||||||
}
|
|
||||||
}
|
|
19
vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go
generated
vendored
19
vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package config contains functionality for interacting with
|
|
||||||
// configuration for controller-runtime components.
|
|
||||||
package config
|
|
22
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go
generated
vendored
22
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package v1alpha1 provides the ControllerManagerConfiguration used for
|
|
||||||
// configuring ctrl.Manager
|
|
||||||
// +kubebuilder:object:generate=true
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
package v1alpha1
|
|
43
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
generated
vendored
43
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
generated
vendored
@ -1,43 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// GroupVersion is group version used to register these objects.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"}
|
|
||||||
|
|
||||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
|
||||||
|
|
||||||
// AddToScheme adds the types in this group-version to the given scheme.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
AddToScheme = SchemeBuilder.AddToScheme
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SchemeBuilder.Register(&ControllerManagerConfiguration{})
|
|
||||||
}
|
|
179
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
generated
vendored
179
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
generated
vendored
@ -1,179 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerManagerConfigurationSpec struct {
|
|
||||||
// SyncPeriod determines the minimum frequency at which watched resources are
|
|
||||||
// reconciled. A lower period will correct entropy more quickly, but reduce
|
|
||||||
// responsiveness to change if there are many watched resources. Change this
|
|
||||||
// value only if you know what you are doing. Defaults to 10 hours if unset.
|
|
||||||
// there will a 10 percent jitter between the SyncPeriod of all controllers
|
|
||||||
// so that all controllers will not send list requests simultaneously.
|
|
||||||
// +optional
|
|
||||||
SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"`
|
|
||||||
|
|
||||||
// LeaderElection is the LeaderElection config to be used when configuring
|
|
||||||
// the manager.Manager leader election
|
|
||||||
// +optional
|
|
||||||
LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
|
||||||
|
|
||||||
// CacheNamespace if specified restricts the manager's cache to watch objects in
|
|
||||||
// the desired namespace Defaults to all namespaces
|
|
||||||
//
|
|
||||||
// Note: If a namespace is specified, controllers can still Watch for a
|
|
||||||
// cluster-scoped resource (e.g Node). For namespaced resources the cache
|
|
||||||
// will only hold objects from the desired namespace.
|
|
||||||
// +optional
|
|
||||||
CacheNamespace string `json:"cacheNamespace,omitempty"`
|
|
||||||
|
|
||||||
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
|
|
||||||
// To disable graceful shutdown, set to time.Duration(0)
|
|
||||||
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
|
|
||||||
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
|
|
||||||
GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"`
|
|
||||||
|
|
||||||
// Controller contains global configuration options for controllers
|
|
||||||
// registered within this manager.
|
|
||||||
// +optional
|
|
||||||
Controller *ControllerConfigurationSpec `json:"controller,omitempty"`
|
|
||||||
|
|
||||||
// Metrics contains the controller metrics configuration
|
|
||||||
// +optional
|
|
||||||
Metrics ControllerMetrics `json:"metrics,omitempty"`
|
|
||||||
|
|
||||||
// Health contains the controller health configuration
|
|
||||||
// +optional
|
|
||||||
Health ControllerHealth `json:"health,omitempty"`
|
|
||||||
|
|
||||||
// Webhook contains the controllers webhook configuration
|
|
||||||
// +optional
|
|
||||||
Webhook ControllerWebhook `json:"webhook,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ControllerConfigurationSpec defines the global configuration for
|
|
||||||
// controllers registered with the manager.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
//
|
|
||||||
// Deprecated: Controller global configuration can now be set at the manager level,
|
|
||||||
// using the manager.Options.Controller field.
|
|
||||||
type ControllerConfigurationSpec struct {
|
|
||||||
// GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation
|
|
||||||
// allowed for that controller.
|
|
||||||
//
|
|
||||||
// When a controller is registered within this manager using the builder utilities,
|
|
||||||
// users have to specify the type the controller reconciles in the For(...) call.
|
|
||||||
// If the object's kind passed matches one of the keys in this map, the concurrency
|
|
||||||
// for that controller is set to the number specified.
|
|
||||||
//
|
|
||||||
// The key is expected to be consistent in form with GroupKind.String(),
|
|
||||||
// e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`.
|
|
||||||
//
|
|
||||||
// +optional
|
|
||||||
GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"`
|
|
||||||
|
|
||||||
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
|
|
||||||
// Defaults to 2 minutes if not set.
|
|
||||||
// +optional
|
|
||||||
CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"`
|
|
||||||
|
|
||||||
// RecoverPanic indicates if panics should be recovered.
|
|
||||||
// +optional
|
|
||||||
RecoverPanic *bool `json:"recoverPanic,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ControllerMetrics defines the metrics configs.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerMetrics struct {
|
|
||||||
// BindAddress is the TCP address that the controller should bind to
|
|
||||||
// for serving prometheus metrics.
|
|
||||||
// It can be set to "0" to disable the metrics serving.
|
|
||||||
// +optional
|
|
||||||
BindAddress string `json:"bindAddress,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ControllerHealth defines the health configs.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerHealth struct {
|
|
||||||
// HealthProbeBindAddress is the TCP address that the controller should bind to
|
|
||||||
// for serving health probes
|
|
||||||
// It can be set to "0" or "" to disable serving the health probe.
|
|
||||||
// +optional
|
|
||||||
HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"`
|
|
||||||
|
|
||||||
// ReadinessEndpointName, defaults to "readyz"
|
|
||||||
// +optional
|
|
||||||
ReadinessEndpointName string `json:"readinessEndpointName,omitempty"`
|
|
||||||
|
|
||||||
// LivenessEndpointName, defaults to "healthz"
|
|
||||||
// +optional
|
|
||||||
LivenessEndpointName string `json:"livenessEndpointName,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ControllerWebhook defines the webhook server for the controller.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerWebhook struct {
|
|
||||||
// Port is the port that the webhook server serves at.
|
|
||||||
// It is used to set webhook.Server.Port.
|
|
||||||
// +optional
|
|
||||||
Port *int `json:"port,omitempty"`
|
|
||||||
|
|
||||||
// Host is the hostname that the webhook server binds to.
|
|
||||||
// It is used to set webhook.Server.Host.
|
|
||||||
// +optional
|
|
||||||
Host string `json:"host,omitempty"`
|
|
||||||
|
|
||||||
// CertDir is the directory that contains the server key and certificate.
|
|
||||||
// if not set, webhook server would look up the server key and certificate in
|
|
||||||
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
|
|
||||||
// must be named tls.key and tls.crt, respectively.
|
|
||||||
// +optional
|
|
||||||
CertDir string `json:"certDir,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
|
|
||||||
// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
type ControllerManagerConfiguration struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
|
|
||||||
// ControllerManagerConfiguration returns the contfigurations for controllers
|
|
||||||
ControllerManagerConfigurationSpec `json:",inline"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Complete returns the configuration for controller-runtime.
|
|
||||||
//
|
|
||||||
// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895.
|
|
||||||
func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) {
|
|
||||||
return *c, nil
|
|
||||||
}
|
|
157
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
157
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@ -1,157 +0,0 @@
|
|||||||
//go:build !ignore_autogenerated
|
|
||||||
|
|
||||||
// Code generated by controller-gen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
|
||||||
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
|
|
||||||
timex "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.GroupKindConcurrency != nil {
|
|
||||||
in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency
|
|
||||||
*out = make(map[string]int, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.CacheSyncTimeout != nil {
|
|
||||||
in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout
|
|
||||||
*out = new(timex.Duration)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.RecoverPanic != nil {
|
|
||||||
in, out := &in.RecoverPanic, &out.RecoverPanic
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec.
|
|
||||||
func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerConfigurationSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) {
|
|
||||||
*out = *in
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth.
|
|
||||||
func (in *ControllerHealth) DeepCopy() *ControllerHealth {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerHealth)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration.
|
|
||||||
func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerManagerConfiguration)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.SyncPeriod != nil {
|
|
||||||
in, out := &in.SyncPeriod, &out.SyncPeriod
|
|
||||||
*out = new(v1.Duration)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.LeaderElection != nil {
|
|
||||||
in, out := &in.LeaderElection, &out.LeaderElection
|
|
||||||
*out = new(configv1alpha1.LeaderElectionConfiguration)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.GracefulShutdownTimeout != nil {
|
|
||||||
in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout
|
|
||||||
*out = new(v1.Duration)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Controller != nil {
|
|
||||||
in, out := &in.Controller, &out.Controller
|
|
||||||
*out = new(ControllerConfigurationSpec)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
out.Metrics = in.Metrics
|
|
||||||
out.Health = in.Health
|
|
||||||
in.Webhook.DeepCopyInto(&out.Webhook)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec.
|
|
||||||
func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerManagerConfigurationSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) {
|
|
||||||
*out = *in
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics.
|
|
||||||
func (in *ControllerMetrics) DeepCopy() *ControllerMetrics {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerMetrics)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) {
|
|
||||||
*out = *in
|
|
||||||
if in.Port != nil {
|
|
||||||
in, out := &in.Port, &out.Port
|
|
||||||
*out = new(int)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook.
|
|
||||||
func (in *ControllerWebhook) DeepCopy() *ControllerWebhook {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ControllerWebhook)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
38
vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
generated
vendored
38
vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
generated
vendored
@ -25,10 +25,8 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
|
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
|
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
@ -59,6 +57,18 @@ type Options struct {
|
|||||||
// The overall is a token bucket and the per-item is exponential.
|
// The overall is a token bucket and the per-item is exponential.
|
||||||
RateLimiter ratelimiter.RateLimiter
|
RateLimiter ratelimiter.RateLimiter
|
||||||
|
|
||||||
|
// NewQueue constructs the queue for this controller once the controller is ready to start.
|
||||||
|
// With NewQueue a custom queue implementation can be used, e.g. a priority queue to prioritize with which
|
||||||
|
// priority/order objects are reconciled (e.g. to reconcile objects with changes first).
|
||||||
|
// This is a func because the standard Kubernetes work queues start themselves immediately, which
|
||||||
|
// leads to goroutine leaks if something calls controller.New repeatedly.
|
||||||
|
// The NewQueue func gets the controller name and the RateLimiter option (defaulted if necessary) passed in.
|
||||||
|
// NewQueue defaults to NewRateLimitingQueueWithConfig.
|
||||||
|
//
|
||||||
|
// NOTE: LOW LEVEL PRIMITIVE!
|
||||||
|
// Only use a custom NewQueue if you know what you are doing.
|
||||||
|
NewQueue func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface
|
||||||
|
|
||||||
// LogConstructor is used to construct a logger used for this controller and passed
|
// LogConstructor is used to construct a logger used for this controller and passed
|
||||||
// to each reconciliation via the context field.
|
// to each reconciliation via the context field.
|
||||||
LogConstructor func(request *reconcile.Request) logr.Logger
|
LogConstructor func(request *reconcile.Request) logr.Logger
|
||||||
@ -72,13 +82,8 @@ type Controller interface {
|
|||||||
// Reconciler is called to reconcile an object by Namespace/Name
|
// Reconciler is called to reconcile an object by Namespace/Name
|
||||||
reconcile.Reconciler
|
reconcile.Reconciler
|
||||||
|
|
||||||
// Watch takes events provided by a Source and uses the EventHandler to
|
// Watch watches the provided Source.
|
||||||
// enqueue reconcile.Requests in response to the events.
|
Watch(src source.Source) error
|
||||||
//
|
|
||||||
// Watch may be provided one or more Predicates to filter events before
|
|
||||||
// they are given to the EventHandler. Events will be passed to the
|
|
||||||
// EventHandler if all provided Predicates evaluate to true.
|
|
||||||
Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error
|
|
||||||
|
|
||||||
// Start starts the controller. Start blocks until the context is closed or a
|
// Start starts the controller. Start blocks until the context is closed or a
|
||||||
// controller has an error starting.
|
// controller has an error starting.
|
||||||
@ -147,6 +152,14 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
|
|||||||
options.RateLimiter = workqueue.DefaultControllerRateLimiter()
|
options.RateLimiter = workqueue.DefaultControllerRateLimiter()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if options.NewQueue == nil {
|
||||||
|
options.NewQueue = func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface {
|
||||||
|
return workqueue.NewRateLimitingQueueWithConfig(rateLimiter, workqueue.RateLimitingQueueConfig{
|
||||||
|
Name: controllerName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if options.RecoverPanic == nil {
|
if options.RecoverPanic == nil {
|
||||||
options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic
|
options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic
|
||||||
}
|
}
|
||||||
@ -158,11 +171,8 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
|
|||||||
// Create controller with dependencies set
|
// Create controller with dependencies set
|
||||||
return &controller.Controller{
|
return &controller.Controller{
|
||||||
Do: options.Reconciler,
|
Do: options.Reconciler,
|
||||||
MakeQueue: func() workqueue.RateLimitingInterface {
|
RateLimiter: options.RateLimiter,
|
||||||
return workqueue.NewRateLimitingQueueWithConfig(options.RateLimiter, workqueue.RateLimitingQueueConfig{
|
NewQueue: options.NewQueue,
|
||||||
Name: name,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
MaxConcurrentReconciles: options.MaxConcurrentReconciles,
|
MaxConcurrentReconciles: options.MaxConcurrentReconciles,
|
||||||
CacheSyncTimeout: options.CacheSyncTimeout,
|
CacheSyncTimeout: options.CacheSyncTimeout,
|
||||||
Name: name,
|
Name: name,
|
||||||
|
55
vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go
generated
vendored
55
vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go
generated
vendored
@ -19,37 +19,54 @@ package event
|
|||||||
import "sigs.k8s.io/controller-runtime/pkg/client"
|
import "sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated
|
// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated
|
||||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
// by a source.Source and transformed into a reconcile.Request by a handler.EventHandler.
|
||||||
type CreateEvent struct {
|
type CreateEvent = TypedCreateEvent[client.Object]
|
||||||
// Object is the object from the event
|
|
||||||
Object client.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated
|
// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated
|
||||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
||||||
type UpdateEvent struct {
|
type UpdateEvent = TypedUpdateEvent[client.Object]
|
||||||
// ObjectOld is the object from the event
|
|
||||||
ObjectOld client.Object
|
|
||||||
|
|
||||||
// ObjectNew is the object from the event
|
|
||||||
ObjectNew client.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated
|
// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated
|
||||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
||||||
type DeleteEvent struct {
|
type DeleteEvent = TypedDeleteEvent[client.Object]
|
||||||
|
|
||||||
|
// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster).
|
||||||
|
// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an
|
||||||
|
// handler.EventHandler.
|
||||||
|
type GenericEvent = TypedGenericEvent[client.Object]
|
||||||
|
|
||||||
|
// TypedCreateEvent is an event where a Kubernetes object was created. TypedCreateEvent should be generated
|
||||||
|
// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler.
|
||||||
|
type TypedCreateEvent[T any] struct {
|
||||||
// Object is the object from the event
|
// Object is the object from the event
|
||||||
Object client.Object
|
Object T
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypedUpdateEvent is an event where a Kubernetes object was updated. TypedUpdateEvent should be generated
|
||||||
|
// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler.
|
||||||
|
type TypedUpdateEvent[T any] struct {
|
||||||
|
// ObjectOld is the object from the event
|
||||||
|
ObjectOld T
|
||||||
|
|
||||||
|
// ObjectNew is the object from the event
|
||||||
|
ObjectNew T
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypedDeleteEvent is an event where a Kubernetes object was deleted. TypedDeleteEvent should be generated
|
||||||
|
// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler.
|
||||||
|
type TypedDeleteEvent[T any] struct {
|
||||||
|
// Object is the object from the event
|
||||||
|
Object T
|
||||||
|
|
||||||
// DeleteStateUnknown is true if the Delete event was missed but we identified the object
|
// DeleteStateUnknown is true if the Delete event was missed but we identified the object
|
||||||
// as having been deleted.
|
// as having been deleted.
|
||||||
DeleteStateUnknown bool
|
DeleteStateUnknown bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster).
|
// TypedGenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster).
|
||||||
// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an
|
// TypedGenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an
|
||||||
// handler.EventHandler.
|
// handler.TypedEventHandler.
|
||||||
type GenericEvent struct {
|
type TypedGenericEvent[T any] struct {
|
||||||
// Object is the object from the event
|
// Object is the object from the event
|
||||||
Object client.Object
|
Object T
|
||||||
}
|
}
|
||||||
|
41
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go
generated
vendored
41
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go
generated
vendored
@ -18,9 +18,11 @@ package handler
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
@ -35,11 +37,18 @@ var _ EventHandler = &EnqueueRequestForObject{}
|
|||||||
// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event.
|
// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event.
|
||||||
// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all
|
// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all
|
||||||
// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource.
|
// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource.
|
||||||
type EnqueueRequestForObject struct{}
|
type EnqueueRequestForObject = TypedEnqueueRequestForObject[client.Object]
|
||||||
|
|
||||||
|
// TypedEnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event.
|
||||||
|
// (e.g. the created / deleted / updated objects Name and Namespace). handler.TypedEnqueueRequestForObject is used by almost all
|
||||||
|
// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource.
|
||||||
|
//
|
||||||
|
// TypedEnqueueRequestForObject is experimental and subject to future change.
|
||||||
|
type TypedEnqueueRequestForObject[T client.Object] struct{}
|
||||||
|
|
||||||
// Create implements EventHandler.
|
// Create implements EventHandler.
|
||||||
func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
func (e *TypedEnqueueRequestForObject[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if evt.Object == nil {
|
if isNil(evt.Object) {
|
||||||
enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt)
|
enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -50,14 +59,14 @@ func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEv
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update implements EventHandler.
|
// Update implements EventHandler.
|
||||||
func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
func (e *TypedEnqueueRequestForObject[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
switch {
|
switch {
|
||||||
case evt.ObjectNew != nil:
|
case !isNil(evt.ObjectNew):
|
||||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||||
Name: evt.ObjectNew.GetName(),
|
Name: evt.ObjectNew.GetName(),
|
||||||
Namespace: evt.ObjectNew.GetNamespace(),
|
Namespace: evt.ObjectNew.GetNamespace(),
|
||||||
}})
|
}})
|
||||||
case evt.ObjectOld != nil:
|
case !isNil(evt.ObjectOld):
|
||||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||||
Name: evt.ObjectOld.GetName(),
|
Name: evt.ObjectOld.GetName(),
|
||||||
Namespace: evt.ObjectOld.GetNamespace(),
|
Namespace: evt.ObjectOld.GetNamespace(),
|
||||||
@ -68,8 +77,8 @@ func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEv
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements EventHandler.
|
// Delete implements EventHandler.
|
||||||
func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
func (e *TypedEnqueueRequestForObject[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if evt.Object == nil {
|
if isNil(evt.Object) {
|
||||||
enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt)
|
enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -80,8 +89,8 @@ func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEv
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generic implements EventHandler.
|
// Generic implements EventHandler.
|
||||||
func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
func (e *TypedEnqueueRequestForObject[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if evt.Object == nil {
|
if isNil(evt.Object) {
|
||||||
enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt)
|
enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -90,3 +99,15 @@ func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.Generic
|
|||||||
Namespace: evt.Object.GetNamespace(),
|
Namespace: evt.Object.GetNamespace(),
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isNil(arg any) bool {
|
||||||
|
if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr ||
|
||||||
|
v.Kind() == reflect.Interface ||
|
||||||
|
v.Kind() == reflect.Slice ||
|
||||||
|
v.Kind() == reflect.Map ||
|
||||||
|
v.Kind() == reflect.Chan ||
|
||||||
|
v.Kind() == reflect.Func) && v.IsNil()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
42
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
generated
vendored
42
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
generated
vendored
@ -27,7 +27,13 @@ import (
|
|||||||
|
|
||||||
// MapFunc is the signature required for enqueueing requests from a generic function.
|
// MapFunc is the signature required for enqueueing requests from a generic function.
|
||||||
// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler.
|
// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler.
|
||||||
type MapFunc func(context.Context, client.Object) []reconcile.Request
|
type MapFunc = TypedMapFunc[client.Object]
|
||||||
|
|
||||||
|
// TypedMapFunc is the signature required for enqueueing requests from a generic function.
|
||||||
|
// This type is usually used with EnqueueRequestsFromTypedMapFunc when registering an event handler.
|
||||||
|
//
|
||||||
|
// TypedMapFunc is experimental and subject to future change.
|
||||||
|
type TypedMapFunc[T any] func(context.Context, T) []reconcile.Request
|
||||||
|
|
||||||
// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection
|
// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection
|
||||||
// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects
|
// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects
|
||||||
@ -40,44 +46,60 @@ type MapFunc func(context.Context, client.Object) []reconcile.Request
|
|||||||
// For UpdateEvents which contain both a new and old object, the transformation function is run on both
|
// For UpdateEvents which contain both a new and old object, the transformation function is run on both
|
||||||
// objects and both sets of Requests are enqueue.
|
// objects and both sets of Requests are enqueue.
|
||||||
func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler {
|
func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler {
|
||||||
return &enqueueRequestsFromMapFunc{
|
return TypedEnqueueRequestsFromMapFunc(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypedEnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection
|
||||||
|
// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects
|
||||||
|
// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects
|
||||||
|
// in response to a cluster resize event caused by adding or deleting a Node)
|
||||||
|
//
|
||||||
|
// TypedEnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other
|
||||||
|
// objects of a differing type.
|
||||||
|
//
|
||||||
|
// For TypedUpdateEvents which contain both a new and old object, the transformation function is run on both
|
||||||
|
// objects and both sets of Requests are enqueue.
|
||||||
|
//
|
||||||
|
// TypedEnqueueRequestsFromMapFunc is experimental and subject to future change.
|
||||||
|
func TypedEnqueueRequestsFromMapFunc[T any](fn TypedMapFunc[T]) TypedEventHandler[T] {
|
||||||
|
return &enqueueRequestsFromMapFunc[T]{
|
||||||
toRequests: fn,
|
toRequests: fn,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ EventHandler = &enqueueRequestsFromMapFunc{}
|
var _ EventHandler = &enqueueRequestsFromMapFunc[client.Object]{}
|
||||||
|
|
||||||
type enqueueRequestsFromMapFunc struct {
|
type enqueueRequestsFromMapFunc[T any] struct {
|
||||||
// Mapper transforms the argument into a slice of keys to be reconciled
|
// Mapper transforms the argument into a slice of keys to be reconciled
|
||||||
toRequests MapFunc
|
toRequests TypedMapFunc[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create implements EventHandler.
|
// Create implements EventHandler.
|
||||||
func (e *enqueueRequestsFromMapFunc) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestsFromMapFunc[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update implements EventHandler.
|
// Update implements EventHandler.
|
||||||
func (e *enqueueRequestsFromMapFunc) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestsFromMapFunc[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs)
|
e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs)
|
||||||
e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs)
|
e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements EventHandler.
|
// Delete implements EventHandler.
|
||||||
func (e *enqueueRequestsFromMapFunc) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestsFromMapFunc[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generic implements EventHandler.
|
// Generic implements EventHandler.
|
||||||
func (e *enqueueRequestsFromMapFunc) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestsFromMapFunc[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
e.mapAndEnqueue(ctx, q, evt.Object, reqs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) {
|
func (e *enqueueRequestsFromMapFunc[T]) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object T, reqs map[reconcile.Request]empty) {
|
||||||
for _, req := range e.toRequests(ctx, object) {
|
for _, req := range e.toRequests(ctx, object) {
|
||||||
_, ok := reqs[req]
|
_, ok := reqs[req]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
48
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go
generated
vendored
48
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go
generated
vendored
@ -32,12 +32,12 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ EventHandler = &enqueueRequestForOwner{}
|
var _ EventHandler = &enqueueRequestForOwner[client.Object]{}
|
||||||
|
|
||||||
var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner")
|
var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner")
|
||||||
|
|
||||||
// OwnerOption modifies an EnqueueRequestForOwner EventHandler.
|
// OwnerOption modifies an EnqueueRequestForOwner EventHandler.
|
||||||
type OwnerOption func(e *enqueueRequestForOwner)
|
type OwnerOption func(e enqueueRequestForOwnerInterface)
|
||||||
|
|
||||||
// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created
|
// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created
|
||||||
// the object that was the source of the Event.
|
// the object that was the source of the Event.
|
||||||
@ -48,7 +48,21 @@ type OwnerOption func(e *enqueueRequestForOwner)
|
|||||||
//
|
//
|
||||||
// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true.
|
// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true.
|
||||||
func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler {
|
func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler {
|
||||||
e := &enqueueRequestForOwner{
|
return TypedEnqueueRequestForOwner[client.Object](scheme, mapper, ownerType, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypedEnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created
|
||||||
|
// the object that was the source of the Event.
|
||||||
|
//
|
||||||
|
// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using:
|
||||||
|
//
|
||||||
|
// - a source.Kind Source with Type of Pod.
|
||||||
|
//
|
||||||
|
// - a handler.typedEnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true.
|
||||||
|
//
|
||||||
|
// TypedEnqueueRequestForOwner is experimental and subject to future change.
|
||||||
|
func TypedEnqueueRequestForOwner[T client.Object](scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) TypedEventHandler[T] {
|
||||||
|
e := &enqueueRequestForOwner[T]{
|
||||||
ownerType: ownerType,
|
ownerType: ownerType,
|
||||||
mapper: mapper,
|
mapper: mapper,
|
||||||
}
|
}
|
||||||
@ -63,12 +77,16 @@ func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, owne
|
|||||||
|
|
||||||
// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true.
|
// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true.
|
||||||
func OnlyControllerOwner() OwnerOption {
|
func OnlyControllerOwner() OwnerOption {
|
||||||
return func(e *enqueueRequestForOwner) {
|
return func(e enqueueRequestForOwnerInterface) {
|
||||||
e.isController = true
|
e.setIsController(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type enqueueRequestForOwner struct {
|
type enqueueRequestForOwnerInterface interface {
|
||||||
|
setIsController(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type enqueueRequestForOwner[T client.Object] struct {
|
||||||
// ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared.
|
// ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared.
|
||||||
ownerType runtime.Object
|
ownerType runtime.Object
|
||||||
|
|
||||||
@ -82,8 +100,12 @@ type enqueueRequestForOwner struct {
|
|||||||
mapper meta.RESTMapper
|
mapper meta.RESTMapper
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *enqueueRequestForOwner[T]) setIsController(isController bool) {
|
||||||
|
e.isController = isController
|
||||||
|
}
|
||||||
|
|
||||||
// Create implements EventHandler.
|
// Create implements EventHandler.
|
||||||
func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestForOwner[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||||
for req := range reqs {
|
for req := range reqs {
|
||||||
@ -92,7 +114,7 @@ func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEve
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update implements EventHandler.
|
// Update implements EventHandler.
|
||||||
func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestForOwner[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.getOwnerReconcileRequest(evt.ObjectOld, reqs)
|
e.getOwnerReconcileRequest(evt.ObjectOld, reqs)
|
||||||
e.getOwnerReconcileRequest(evt.ObjectNew, reqs)
|
e.getOwnerReconcileRequest(evt.ObjectNew, reqs)
|
||||||
@ -102,7 +124,7 @@ func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEve
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements EventHandler.
|
// Delete implements EventHandler.
|
||||||
func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestForOwner[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||||
for req := range reqs {
|
for req := range reqs {
|
||||||
@ -111,7 +133,7 @@ func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEve
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generic implements EventHandler.
|
// Generic implements EventHandler.
|
||||||
func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
func (e *enqueueRequestForOwner[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
reqs := map[reconcile.Request]empty{}
|
reqs := map[reconcile.Request]empty{}
|
||||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||||
for req := range reqs {
|
for req := range reqs {
|
||||||
@ -121,7 +143,7 @@ func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericE
|
|||||||
|
|
||||||
// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false
|
// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false
|
||||||
// if the OwnerType could not be parsed using the scheme.
|
// if the OwnerType could not be parsed using the scheme.
|
||||||
func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error {
|
func (e *enqueueRequestForOwner[T]) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error {
|
||||||
// Get the kinds of the type
|
// Get the kinds of the type
|
||||||
kinds, _, err := scheme.ObjectKinds(e.ownerType)
|
kinds, _, err := scheme.ObjectKinds(e.ownerType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -141,7 +163,7 @@ func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme)
|
|||||||
|
|
||||||
// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile
|
// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile
|
||||||
// owners of object that match e.OwnerType.
|
// owners of object that match e.OwnerType.
|
||||||
func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) {
|
func (e *enqueueRequestForOwner[T]) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) {
|
||||||
// Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested
|
// Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested
|
||||||
// by the user
|
// by the user
|
||||||
for _, ref := range e.getOwnersReferences(object) {
|
for _, ref := range e.getOwnersReferences(object) {
|
||||||
@ -181,7 +203,7 @@ func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object,
|
|||||||
// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner
|
// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner
|
||||||
// - if IsController is true: only take the Controller OwnerReference (if found)
|
// - if IsController is true: only take the Controller OwnerReference (if found)
|
||||||
// - if IsController is false: take all OwnerReferences.
|
// - if IsController is false: take all OwnerReferences.
|
||||||
func (e *enqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference {
|
func (e *enqueueRequestForOwner[T]) getOwnersReferences(object metav1.Object) []metav1.OwnerReference {
|
||||||
if object == nil {
|
if object == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
60
vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
generated
vendored
60
vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
generated
vendored
@ -20,12 +20,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event
|
// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event
|
||||||
// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an
|
// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an
|
||||||
// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar.
|
// Event for object with type Foo (using source.Kind) then reconcile one or more object(s) with type Bar.
|
||||||
//
|
//
|
||||||
// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called.
|
// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called.
|
||||||
//
|
//
|
||||||
@ -41,65 +42,92 @@ import (
|
|||||||
//
|
//
|
||||||
// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface.
|
// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface.
|
||||||
// Most users shouldn't need to implement their own EventHandler.
|
// Most users shouldn't need to implement their own EventHandler.
|
||||||
type EventHandler interface {
|
type EventHandler TypedEventHandler[client.Object]
|
||||||
|
|
||||||
|
// TypedEventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). TypedEventHandlers map an Event
|
||||||
|
// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an
|
||||||
|
// Event for object with type Foo (using source.Kind) then reconcile one or more object(s) with type Bar.
|
||||||
|
//
|
||||||
|
// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called.
|
||||||
|
//
|
||||||
|
// * Use TypedEnqueueRequestForObject to reconcile the object the event is for
|
||||||
|
// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller)
|
||||||
|
//
|
||||||
|
// * Use TypedEnqueueRequestForOwner to reconcile the owner of the object the event is for
|
||||||
|
// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller)
|
||||||
|
//
|
||||||
|
// * Use TypedEnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object
|
||||||
|
// of a different type - do this for events for types the Controller may be interested in, but doesn't create.
|
||||||
|
// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.)
|
||||||
|
//
|
||||||
|
// Unless you are implementing your own TypedEventHandler, you can ignore the functions on the TypedEventHandler interface.
|
||||||
|
// Most users shouldn't need to implement their own TypedEventHandler.
|
||||||
|
//
|
||||||
|
// TypedEventHandler is experimental and subject to future change.
|
||||||
|
type TypedEventHandler[T any] interface {
|
||||||
// Create is called in response to a create event - e.g. Pod Creation.
|
// Create is called in response to a create event - e.g. Pod Creation.
|
||||||
Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface)
|
Create(context.Context, event.TypedCreateEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// Update is called in response to an update event - e.g. Pod Updated.
|
// Update is called in response to an update event - e.g. Pod Updated.
|
||||||
Update(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface)
|
Update(context.Context, event.TypedUpdateEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// Delete is called in response to a delete event - e.g. Pod Deleted.
|
// Delete is called in response to a delete event - e.g. Pod Deleted.
|
||||||
Delete(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface)
|
Delete(context.Context, event.TypedDeleteEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or
|
// Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or
|
||||||
// external trigger request - e.g. reconcile Autoscaling, or a Webhook.
|
// external trigger request - e.g. reconcile Autoscaling, or a Webhook.
|
||||||
Generic(context.Context, event.GenericEvent, workqueue.RateLimitingInterface)
|
Generic(context.Context, event.TypedGenericEvent[T], workqueue.RateLimitingInterface)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ EventHandler = Funcs{}
|
var _ EventHandler = Funcs{}
|
||||||
|
|
||||||
// Funcs implements EventHandler.
|
// Funcs implements eventhandler.
|
||||||
type Funcs struct {
|
type Funcs = TypedFuncs[client.Object]
|
||||||
|
|
||||||
|
// TypedFuncs implements eventhandler.
|
||||||
|
//
|
||||||
|
// TypedFuncs is experimental and subject to future change.
|
||||||
|
type TypedFuncs[T any] struct {
|
||||||
// Create is called in response to an add event. Defaults to no-op.
|
// Create is called in response to an add event. Defaults to no-op.
|
||||||
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
||||||
CreateFunc func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface)
|
CreateFunc func(context.Context, event.TypedCreateEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// Update is called in response to an update event. Defaults to no-op.
|
// Update is called in response to an update event. Defaults to no-op.
|
||||||
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
||||||
UpdateFunc func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface)
|
UpdateFunc func(context.Context, event.TypedUpdateEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// Delete is called in response to a delete event. Defaults to no-op.
|
// Delete is called in response to a delete event. Defaults to no-op.
|
||||||
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
||||||
DeleteFunc func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface)
|
DeleteFunc func(context.Context, event.TypedDeleteEvent[T], workqueue.RateLimitingInterface)
|
||||||
|
|
||||||
// GenericFunc is called in response to a generic event. Defaults to no-op.
|
// GenericFunc is called in response to a generic event. Defaults to no-op.
|
||||||
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
// RateLimitingInterface is used to enqueue reconcile.Requests.
|
||||||
GenericFunc func(context.Context, event.GenericEvent, workqueue.RateLimitingInterface)
|
GenericFunc func(context.Context, event.TypedGenericEvent[T], workqueue.RateLimitingInterface)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create implements EventHandler.
|
// Create implements EventHandler.
|
||||||
func (h Funcs) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
func (h TypedFuncs[T]) Create(ctx context.Context, e event.TypedCreateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if h.CreateFunc != nil {
|
if h.CreateFunc != nil {
|
||||||
h.CreateFunc(ctx, e, q)
|
h.CreateFunc(ctx, e, q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements EventHandler.
|
// Delete implements EventHandler.
|
||||||
func (h Funcs) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
func (h TypedFuncs[T]) Delete(ctx context.Context, e event.TypedDeleteEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if h.DeleteFunc != nil {
|
if h.DeleteFunc != nil {
|
||||||
h.DeleteFunc(ctx, e, q)
|
h.DeleteFunc(ctx, e, q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update implements EventHandler.
|
// Update implements EventHandler.
|
||||||
func (h Funcs) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
func (h TypedFuncs[T]) Update(ctx context.Context, e event.TypedUpdateEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if h.UpdateFunc != nil {
|
if h.UpdateFunc != nil {
|
||||||
h.UpdateFunc(ctx, e, q)
|
h.UpdateFunc(ctx, e, q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generic implements EventHandler.
|
// Generic implements EventHandler.
|
||||||
func (h Funcs) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
func (h TypedFuncs[T]) Generic(ctx context.Context, e event.TypedGenericEvent[T], q workqueue.RateLimitingInterface) {
|
||||||
if h.GenericFunc != nil {
|
if h.GenericFunc != nil {
|
||||||
h.GenericFunc(ctx, e, q)
|
h.GenericFunc(ctx, e, q)
|
||||||
}
|
}
|
||||||
|
35
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
35
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
@ -29,10 +29,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
|
||||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
|
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||||
)
|
)
|
||||||
@ -50,10 +49,13 @@ type Controller struct {
|
|||||||
// Defaults to the DefaultReconcileFunc.
|
// Defaults to the DefaultReconcileFunc.
|
||||||
Do reconcile.Reconciler
|
Do reconcile.Reconciler
|
||||||
|
|
||||||
// MakeQueue constructs the queue for this controller once the controller is ready to start.
|
// RateLimiter is used to limit how frequently requests may be queued into the work queue.
|
||||||
// This exists because the standard Kubernetes workqueues start themselves immediately, which
|
RateLimiter ratelimiter.RateLimiter
|
||||||
|
|
||||||
|
// NewQueue constructs the queue for this controller once the controller is ready to start.
|
||||||
|
// This is a func because the standard Kubernetes work queues start themselves immediately, which
|
||||||
// leads to goroutine leaks if something calls controller.New repeatedly.
|
// leads to goroutine leaks if something calls controller.New repeatedly.
|
||||||
MakeQueue func() workqueue.RateLimitingInterface
|
NewQueue func(controllerName string, rateLimiter ratelimiter.RateLimiter) workqueue.RateLimitingInterface
|
||||||
|
|
||||||
// Queue is an listeningQueue that listens for events from Informers and adds object keys to
|
// Queue is an listeningQueue that listens for events from Informers and adds object keys to
|
||||||
// the Queue for processing
|
// the Queue for processing
|
||||||
@ -77,7 +79,7 @@ type Controller struct {
|
|||||||
CacheSyncTimeout time.Duration
|
CacheSyncTimeout time.Duration
|
||||||
|
|
||||||
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
||||||
startWatches []watchDescription
|
startWatches []source.Source
|
||||||
|
|
||||||
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
|
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
|
||||||
// or for example when a watch is started.
|
// or for example when a watch is started.
|
||||||
@ -92,13 +94,6 @@ type Controller struct {
|
|||||||
LeaderElected *bool
|
LeaderElected *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchDescription contains all the information necessary to start a watch.
|
|
||||||
type watchDescription struct {
|
|
||||||
src source.Source
|
|
||||||
handler handler.EventHandler
|
|
||||||
predicates []predicate.Predicate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconcile implements reconcile.Reconciler.
|
// Reconcile implements reconcile.Reconciler.
|
||||||
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -120,7 +115,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ re
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch implements controller.Controller.
|
// Watch implements controller.Controller.
|
||||||
func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
|
func (c *Controller) Watch(src source.Source) error {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
@ -128,12 +123,12 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
|
|||||||
//
|
//
|
||||||
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
|
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
|
||||||
if !c.Started {
|
if !c.Started {
|
||||||
c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct})
|
c.startWatches = append(c.startWatches, src)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
|
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
|
||||||
return src.Start(c.ctx, evthdler, c.Queue, prct...)
|
return src.Start(c.ctx, c.Queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedLeaderElection implements the manager.LeaderElectionRunnable interface.
|
// NeedLeaderElection implements the manager.LeaderElectionRunnable interface.
|
||||||
@ -158,7 +153,7 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||||||
// Set the internal context.
|
// Set the internal context.
|
||||||
c.ctx = ctx
|
c.ctx = ctx
|
||||||
|
|
||||||
c.Queue = c.MakeQueue()
|
c.Queue = c.NewQueue(c.Name, c.RateLimiter)
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
c.Queue.ShutDown()
|
c.Queue.ShutDown()
|
||||||
@ -175,9 +170,9 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||||||
// caches to sync so that they have a chance to register their intendeded
|
// caches to sync so that they have a chance to register their intendeded
|
||||||
// caches.
|
// caches.
|
||||||
for _, watch := range c.startWatches {
|
for _, watch := range c.startWatches {
|
||||||
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
|
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch))
|
||||||
|
|
||||||
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
|
if err := watch.Start(ctx, c.Queue); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -186,7 +181,7 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||||||
c.LogConstructor(nil).Info("Starting Controller")
|
c.LogConstructor(nil).Info("Starting Controller")
|
||||||
|
|
||||||
for _, watch := range c.startWatches {
|
for _, watch := range c.startWatches {
|
||||||
syncingSource, ok := watch.src.(source.SyncingSource)
|
syncingSource, ok := watch.(source.SyncingSource)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
32
vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go
generated
vendored
32
vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go
generated
vendored
@ -33,8 +33,8 @@ import (
|
|||||||
var log = logf.RuntimeLog.WithName("source").WithName("EventHandler")
|
var log = logf.RuntimeLog.WithName("source").WithName("EventHandler")
|
||||||
|
|
||||||
// NewEventHandler creates a new EventHandler.
|
// NewEventHandler creates a new EventHandler.
|
||||||
func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler {
|
func NewEventHandler[T client.Object](ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.TypedEventHandler[T], predicates []predicate.TypedPredicate[T]) *EventHandler[T] {
|
||||||
return &EventHandler{
|
return &EventHandler[T]{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
handler: handler,
|
handler: handler,
|
||||||
queue: queue,
|
queue: queue,
|
||||||
@ -43,19 +43,19 @@ func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface.
|
// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface.
|
||||||
type EventHandler struct {
|
type EventHandler[T client.Object] struct {
|
||||||
// ctx stores the context that created the event handler
|
// ctx stores the context that created the event handler
|
||||||
// that is used to propagate cancellation signals to each handler function.
|
// that is used to propagate cancellation signals to each handler function.
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
||||||
handler handler.EventHandler
|
handler handler.TypedEventHandler[T]
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
predicates []predicate.Predicate
|
predicates []predicate.TypedPredicate[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs
|
// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs
|
||||||
// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27
|
// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27
|
||||||
func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs {
|
func (e *EventHandler[T]) HandlerFuncs() cache.ResourceEventHandlerFuncs {
|
||||||
return cache.ResourceEventHandlerFuncs{
|
return cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: e.OnAdd,
|
AddFunc: e.OnAdd,
|
||||||
UpdateFunc: e.OnUpdate,
|
UpdateFunc: e.OnUpdate,
|
||||||
@ -64,11 +64,11 @@ func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnAdd creates CreateEvent and calls Create on EventHandler.
|
// OnAdd creates CreateEvent and calls Create on EventHandler.
|
||||||
func (e *EventHandler) OnAdd(obj interface{}) {
|
func (e *EventHandler[T]) OnAdd(obj interface{}) {
|
||||||
c := event.CreateEvent{}
|
c := event.TypedCreateEvent[T]{}
|
||||||
|
|
||||||
// Pull Object out of the object
|
// Pull Object out of the object
|
||||||
if o, ok := obj.(client.Object); ok {
|
if o, ok := obj.(T); ok {
|
||||||
c.Object = o
|
c.Object = o
|
||||||
} else {
|
} else {
|
||||||
log.Error(nil, "OnAdd missing Object",
|
log.Error(nil, "OnAdd missing Object",
|
||||||
@ -89,10 +89,10 @@ func (e *EventHandler) OnAdd(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnUpdate creates UpdateEvent and calls Update on EventHandler.
|
// OnUpdate creates UpdateEvent and calls Update on EventHandler.
|
||||||
func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
func (e *EventHandler[T]) OnUpdate(oldObj, newObj interface{}) {
|
||||||
u := event.UpdateEvent{}
|
u := event.TypedUpdateEvent[T]{}
|
||||||
|
|
||||||
if o, ok := oldObj.(client.Object); ok {
|
if o, ok := oldObj.(T); ok {
|
||||||
u.ObjectOld = o
|
u.ObjectOld = o
|
||||||
} else {
|
} else {
|
||||||
log.Error(nil, "OnUpdate missing ObjectOld",
|
log.Error(nil, "OnUpdate missing ObjectOld",
|
||||||
@ -101,7 +101,7 @@ func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pull Object out of the object
|
// Pull Object out of the object
|
||||||
if o, ok := newObj.(client.Object); ok {
|
if o, ok := newObj.(T); ok {
|
||||||
u.ObjectNew = o
|
u.ObjectNew = o
|
||||||
} else {
|
} else {
|
||||||
log.Error(nil, "OnUpdate missing ObjectNew",
|
log.Error(nil, "OnUpdate missing ObjectNew",
|
||||||
@ -122,8 +122,8 @@ func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OnDelete creates DeleteEvent and calls Delete on EventHandler.
|
// OnDelete creates DeleteEvent and calls Delete on EventHandler.
|
||||||
func (e *EventHandler) OnDelete(obj interface{}) {
|
func (e *EventHandler[T]) OnDelete(obj interface{}) {
|
||||||
d := event.DeleteEvent{}
|
d := event.TypedDeleteEvent[T]{}
|
||||||
|
|
||||||
// Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a
|
// Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a
|
||||||
// DeleteFinalStateUnknown struct, so the object needs to be pulled out.
|
// DeleteFinalStateUnknown struct, so the object needs to be pulled out.
|
||||||
@ -149,7 +149,7 @@ func (e *EventHandler) OnDelete(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pull Object out of the object
|
// Pull Object out of the object
|
||||||
if o, ok := obj.(client.Object); ok {
|
if o, ok := obj.(T); ok {
|
||||||
d.Object = o
|
d.Object = o
|
||||||
} else {
|
} else {
|
||||||
log.Error(nil, "OnDelete missing Object",
|
log.Error(nil, "OnDelete missing Object",
|
||||||
|
58
vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go
generated
vendored
58
vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go
generated
vendored
@ -4,12 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
@ -17,34 +19,40 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
|
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
|
||||||
type Kind struct {
|
type Kind[T client.Object] struct {
|
||||||
// Type is the type of object to watch. e.g. &v1.Pod{}
|
// Type is the type of object to watch. e.g. &v1.Pod{}
|
||||||
Type client.Object
|
Type T
|
||||||
|
|
||||||
// Cache used to watch APIs
|
// Cache used to watch APIs
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
|
|
||||||
// started may contain an error if one was encountered during startup. If its closed and does not
|
Handler handler.TypedEventHandler[T]
|
||||||
|
|
||||||
|
Predicates []predicate.TypedPredicate[T]
|
||||||
|
|
||||||
|
// startedErr may contain an error if one was encountered during startup. If its closed and does not
|
||||||
// contain an error, startup and syncing finished.
|
// contain an error, startup and syncing finished.
|
||||||
started chan error
|
startedErr chan error
|
||||||
startCancel func()
|
startCancel func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||||
// to enqueue reconcile.Requests.
|
// to enqueue reconcile.Requests.
|
||||||
func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
func (ks *Kind[T]) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error {
|
||||||
prct ...predicate.Predicate) error {
|
if isNil(ks.Type) {
|
||||||
if ks.Type == nil {
|
|
||||||
return fmt.Errorf("must create Kind with a non-nil object")
|
return fmt.Errorf("must create Kind with a non-nil object")
|
||||||
}
|
}
|
||||||
if ks.Cache == nil {
|
if isNil(ks.Cache) {
|
||||||
return fmt.Errorf("must create Kind with a non-nil cache")
|
return fmt.Errorf("must create Kind with a non-nil cache")
|
||||||
}
|
}
|
||||||
|
if isNil(ks.Handler) {
|
||||||
|
return errors.New("must create Kind with non-nil handler")
|
||||||
|
}
|
||||||
|
|
||||||
// cache.GetInformer will block until its context is cancelled if the cache was already started and it can not
|
// cache.GetInformer will block until its context is cancelled if the cache was already started and it can not
|
||||||
// sync that informer (most commonly due to RBAC issues).
|
// sync that informer (most commonly due to RBAC issues).
|
||||||
ctx, ks.startCancel = context.WithCancel(ctx)
|
ctx, ks.startCancel = context.WithCancel(ctx)
|
||||||
ks.started = make(chan error)
|
ks.startedErr = make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
var (
|
var (
|
||||||
i cache.Informer
|
i cache.Informer
|
||||||
@ -72,30 +80,30 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
|
|||||||
return true, nil
|
return true, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
if lastErr != nil {
|
if lastErr != nil {
|
||||||
ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr)
|
ks.startedErr <- fmt.Errorf("failed to get informer from cache: %w", lastErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ks.started <- err
|
ks.startedErr <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs())
|
_, err := i.AddEventHandler(NewEventHandler(ctx, queue, ks.Handler, ks.Predicates).HandlerFuncs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ks.started <- err
|
ks.startedErr <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !ks.Cache.WaitForCacheSync(ctx) {
|
if !ks.Cache.WaitForCacheSync(ctx) {
|
||||||
// Would be great to return something more informative here
|
// Would be great to return something more informative here
|
||||||
ks.started <- errors.New("cache did not sync")
|
ks.startedErr <- errors.New("cache did not sync")
|
||||||
}
|
}
|
||||||
close(ks.started)
|
close(ks.startedErr)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ks *Kind) String() string {
|
func (ks *Kind[T]) String() string {
|
||||||
if ks.Type != nil {
|
if !isNil(ks.Type) {
|
||||||
return fmt.Sprintf("kind source: %T", ks.Type)
|
return fmt.Sprintf("kind source: %T", ks.Type)
|
||||||
}
|
}
|
||||||
return "kind source: unknown type"
|
return "kind source: unknown type"
|
||||||
@ -103,9 +111,9 @@ func (ks *Kind) String() string {
|
|||||||
|
|
||||||
// WaitForSync implements SyncingSource to allow controllers to wait with starting
|
// WaitForSync implements SyncingSource to allow controllers to wait with starting
|
||||||
// workers until the cache is synced.
|
// workers until the cache is synced.
|
||||||
func (ks *Kind) WaitForSync(ctx context.Context) error {
|
func (ks *Kind[T]) WaitForSync(ctx context.Context) error {
|
||||||
select {
|
select {
|
||||||
case err := <-ks.started:
|
case err := <-ks.startedErr:
|
||||||
return err
|
return err
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
ks.startCancel()
|
ks.startCancel()
|
||||||
@ -115,3 +123,15 @@ func (ks *Kind) WaitForSync(ctx context.Context) error {
|
|||||||
return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type)
|
return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isNil(arg any) bool {
|
||||||
|
if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr ||
|
||||||
|
v.Kind() == reflect.Interface ||
|
||||||
|
v.Kind() == reflect.Slice ||
|
||||||
|
v.Kind() == reflect.Map ||
|
||||||
|
v.Kind() == reflect.Chan ||
|
||||||
|
v.Kind() == reflect.Func) && v.IsNil()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
45
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
45
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
@ -179,6 +179,24 @@ func (cm *controllerManager) add(r Runnable) error {
|
|||||||
return cm.runnables.Add(r)
|
return cm.runnables.Add(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics.
|
||||||
|
func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error {
|
||||||
|
cm.Lock()
|
||||||
|
defer cm.Unlock()
|
||||||
|
if cm.started {
|
||||||
|
return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created")
|
||||||
|
}
|
||||||
|
if cm.metricsServer == nil {
|
||||||
|
cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler %q will be ignored", path)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// AddHealthzCheck allows you to add Healthz checker.
|
// AddHealthzCheck allows you to add Healthz checker.
|
||||||
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
|
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
|
||||||
cm.Lock()
|
cm.Lock()
|
||||||
@ -284,9 +302,8 @@ func (cm *controllerManager) addHealthProbeServer() error {
|
|||||||
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||||
}
|
}
|
||||||
|
|
||||||
return cm.add(&server{
|
return cm.add(&Server{
|
||||||
Kind: "health probe",
|
Name: "health probe",
|
||||||
Log: cm.logger,
|
|
||||||
Server: srv,
|
Server: srv,
|
||||||
Listener: cm.healthProbeListener,
|
Listener: cm.healthProbeListener,
|
||||||
})
|
})
|
||||||
@ -302,9 +319,8 @@ func (cm *controllerManager) addPprofServer() error {
|
|||||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||||
|
|
||||||
return cm.add(&server{
|
return cm.add(&Server{
|
||||||
Kind: "pprof",
|
Name: "pprof",
|
||||||
Log: cm.logger,
|
|
||||||
Server: srv,
|
Server: srv,
|
||||||
Listener: cm.pprofListener,
|
Listener: cm.pprofListener,
|
||||||
})
|
})
|
||||||
@ -384,15 +400,14 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// First start any internal HTTP servers, which includes health probes, metrics and profiling if enabled.
|
// First start any HTTP servers, which includes health probes, metrics and profiling if enabled.
|
||||||
//
|
//
|
||||||
// WARNING: Internal HTTP servers MUST start before any cache is populated, otherwise it would block
|
// WARNING: HTTPServers includes the health probes, which MUST start before any cache is populated, otherwise
|
||||||
// conversion webhooks to be ready for serving which make the cache never get ready.
|
// it would block conversion webhooks to be ready for serving which make the cache never get ready.
|
||||||
if err := cm.runnables.HTTPServers.Start(cm.internalCtx); err != nil {
|
logCtx := logr.NewContext(cm.internalCtx, cm.logger)
|
||||||
if err != nil {
|
if err := cm.runnables.HTTPServers.Start(logCtx); err != nil {
|
||||||
return fmt.Errorf("failed to start HTTP servers: %w", err)
|
return fmt.Errorf("failed to start HTTP servers: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Start any webhook servers, which includes conversion, validation, and defaulting
|
// Start any webhook servers, which includes conversion, validation, and defaulting
|
||||||
// webhooks that are registered.
|
// webhooks that are registered.
|
||||||
@ -401,24 +416,18 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
|||||||
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
|
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
|
||||||
// to never start because no cache can be populated.
|
// to never start because no cache can be populated.
|
||||||
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
|
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to start webhooks: %w", err)
|
return fmt.Errorf("failed to start webhooks: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Start and wait for caches.
|
// Start and wait for caches.
|
||||||
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
|
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to start caches: %w", err)
|
return fmt.Errorf("failed to start caches: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Start the non-leaderelection Runnables after the cache has synced.
|
// Start the non-leaderelection Runnables after the cache has synced.
|
||||||
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
|
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to start other runnables: %w", err)
|
return fmt.Errorf("failed to start other runnables: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Start the leader election and all required runnables.
|
// Start the leader election and all required runnables.
|
||||||
{
|
{
|
||||||
|
132
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
132
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
@ -22,14 +22,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
coordinationv1 "k8s.io/api/coordination/v1"
|
coordinationv1 "k8s.io/api/coordination/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
@ -41,7 +39,6 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
|
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
|
||||||
@ -67,6 +64,15 @@ type Manager interface {
|
|||||||
// election was configured.
|
// election was configured.
|
||||||
Elected() <-chan struct{}
|
Elected() <-chan struct{}
|
||||||
|
|
||||||
|
// AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics.
|
||||||
|
// Might be useful to register some diagnostic endpoints e.g. pprof.
|
||||||
|
//
|
||||||
|
// Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly.
|
||||||
|
//
|
||||||
|
// If the simple path -> handler mapping offered here is not enough,
|
||||||
|
// a new http server/listener should be added as Runnable to the manager via Add method.
|
||||||
|
AddMetricsServerExtraHandler(path string, handler http.Handler) error
|
||||||
|
|
||||||
// AddHealthzCheck allows you to add Healthz checker
|
// AddHealthzCheck allows you to add Healthz checker
|
||||||
AddHealthzCheck(name string, check healthz.Checker) error
|
AddHealthzCheck(name string, check healthz.Checker) error
|
||||||
|
|
||||||
@ -438,126 +444,6 @@ func New(config *rest.Config, options Options) (Manager, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AndFrom will use a supplied type and convert to Options
|
|
||||||
// any options already set on Options will be ignored, this is used to allow
|
|
||||||
// cli flags to override anything specified in the config file.
|
|
||||||
//
|
|
||||||
// Deprecated: This function has been deprecated and will be removed in a future release,
|
|
||||||
// The Component Configuration package has been unmaintained for over a year and is no longer
|
|
||||||
// actively developed. Users should migrate to their own configuration format
|
|
||||||
// and configure Manager.Options directly.
|
|
||||||
// See https://github.com/kubernetes-sigs/controller-runtime/issues/895
|
|
||||||
// for more information, feedback, and comments.
|
|
||||||
func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) {
|
|
||||||
newObj, err := loader.Complete()
|
|
||||||
if err != nil {
|
|
||||||
return o, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o = o.setLeaderElectionConfig(newObj)
|
|
||||||
|
|
||||||
if o.Cache.SyncPeriod == nil && newObj.SyncPeriod != nil {
|
|
||||||
o.Cache.SyncPeriod = &newObj.SyncPeriod.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(o.Cache.DefaultNamespaces) == 0 && newObj.CacheNamespace != "" {
|
|
||||||
o.Cache.DefaultNamespaces = map[string]cache.Config{newObj.CacheNamespace: {}}
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.Metrics.BindAddress == "" && newObj.Metrics.BindAddress != "" {
|
|
||||||
o.Metrics.BindAddress = newObj.Metrics.BindAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" {
|
|
||||||
o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" {
|
|
||||||
o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" {
|
|
||||||
o.LivenessEndpointName = newObj.Health.LivenessEndpointName
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.WebhookServer == nil {
|
|
||||||
port := 0
|
|
||||||
if newObj.Webhook.Port != nil {
|
|
||||||
port = *newObj.Webhook.Port
|
|
||||||
}
|
|
||||||
o.WebhookServer = webhook.NewServer(webhook.Options{
|
|
||||||
Port: port,
|
|
||||||
Host: newObj.Webhook.Host,
|
|
||||||
CertDir: newObj.Webhook.CertDir,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if newObj.Controller != nil {
|
|
||||||
if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil {
|
|
||||||
o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 {
|
|
||||||
o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AndFromOrDie will use options.AndFrom() and will panic if there are errors.
|
|
||||||
//
|
|
||||||
// Deprecated: This function has been deprecated and will be removed in a future release,
|
|
||||||
// The Component Configuration package has been unmaintained for over a year and is no longer
|
|
||||||
// actively developed. Users should migrate to their own configuration format
|
|
||||||
// and configure Manager.Options directly.
|
|
||||||
// See https://github.com/kubernetes-sigs/controller-runtime/issues/895
|
|
||||||
// for more information, feedback, and comments.
|
|
||||||
func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options {
|
|
||||||
o, err := o.AndFrom(loader)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not parse config file: %v", err))
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
|
|
||||||
if obj.LeaderElection == nil {
|
|
||||||
// The source does not have any configuration; noop
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
|
|
||||||
o.LeaderElection = *obj.LeaderElection.LeaderElect
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" {
|
|
||||||
o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" {
|
|
||||||
o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" {
|
|
||||||
o.LeaderElectionID = obj.LeaderElection.ResourceName
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) {
|
|
||||||
o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) {
|
|
||||||
o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) {
|
|
||||||
o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
|
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
|
||||||
func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||||
if addr == "" || addr == "0" {
|
if addr == "" || addr == "0" {
|
||||||
|
5
vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
generated
vendored
5
vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
generated
vendored
@ -54,7 +54,10 @@ func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
|
|||||||
// The runnables added after Start are started directly.
|
// The runnables added after Start are started directly.
|
||||||
func (r *runnables) Add(fn Runnable) error {
|
func (r *runnables) Add(fn Runnable) error {
|
||||||
switch runnable := fn.(type) {
|
switch runnable := fn.(type) {
|
||||||
case *server:
|
case *Server:
|
||||||
|
if runnable.NeedLeaderElection() {
|
||||||
|
return r.LeaderElection.Add(fn, nil)
|
||||||
|
}
|
||||||
return r.HTTPServers.Add(fn, nil)
|
return r.HTTPServers.Add(fn, nil)
|
||||||
case hasCache:
|
case hasCache:
|
||||||
return r.Caches.Add(fn, func(ctx context.Context) bool {
|
return r.Caches.Add(fn, func(ctx context.Context) bool {
|
||||||
|
72
vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
generated
vendored
72
vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
generated
vendored
@ -21,34 +21,67 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
crlog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// server is a general purpose HTTP server Runnable for a manager
|
var (
|
||||||
// to serve some internal handlers such as health probes, metrics and profiling.
|
_ Runnable = (*Server)(nil)
|
||||||
type server struct {
|
_ LeaderElectionRunnable = (*Server)(nil)
|
||||||
Kind string
|
)
|
||||||
Log logr.Logger
|
|
||||||
|
// Server is a general purpose HTTP server Runnable for a manager.
|
||||||
|
// It is used to serve some internal handlers for health probes and profiling,
|
||||||
|
// but it can also be used to run custom servers.
|
||||||
|
type Server struct {
|
||||||
|
// Name is an optional string that describes the purpose of the server. It is used in logs to distinguish
|
||||||
|
// among multiple servers.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Server is the HTTP server to run. It is required.
|
||||||
Server *http.Server
|
Server *http.Server
|
||||||
|
|
||||||
|
// Listener is an optional listener to use. If not set, the server start a listener using the server.Addr.
|
||||||
|
// Using a listener is useful when the port reservation needs to happen in advance of this runnable starting.
|
||||||
Listener net.Listener
|
Listener net.Listener
|
||||||
|
|
||||||
|
// OnlyServeWhenLeader is an optional bool that indicates that the server should only be started when the manager is the leader.
|
||||||
|
OnlyServeWhenLeader bool
|
||||||
|
|
||||||
|
// ShutdownTimeout is an optional duration that indicates how long to wait for the server to shutdown gracefully. If not set,
|
||||||
|
// the server will wait indefinitely for all connections to close.
|
||||||
|
ShutdownTimeout *time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *server) Start(ctx context.Context) error {
|
// Start starts the server. It will block until the server is stopped or an error occurs.
|
||||||
log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr())
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
|
log := crlog.FromContext(ctx)
|
||||||
|
if s.Name != "" {
|
||||||
|
log = log.WithValues("name", s.Name)
|
||||||
|
}
|
||||||
|
log = log.WithValues("addr", s.addr())
|
||||||
|
|
||||||
serverShutdown := make(chan struct{})
|
serverShutdown := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
log.Info("shutting down server")
|
log.Info("shutting down server")
|
||||||
if err := s.Server.Shutdown(context.Background()); err != nil {
|
|
||||||
|
shutdownCtx := context.Background()
|
||||||
|
if s.ShutdownTimeout != nil {
|
||||||
|
var shutdownCancel context.CancelFunc
|
||||||
|
shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), *s.ShutdownTimeout)
|
||||||
|
defer shutdownCancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Server.Shutdown(shutdownCtx); err != nil {
|
||||||
log.Error(err, "error shutting down server")
|
log.Error(err, "error shutting down server")
|
||||||
}
|
}
|
||||||
close(serverShutdown)
|
close(serverShutdown)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Info("starting server")
|
log.Info("starting server")
|
||||||
if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err := s.serve(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,6 +89,21 @@ func (s *server) Start(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *server) NeedLeaderElection() bool {
|
// NeedLeaderElection returns true if the server should only be started when the manager is the leader.
|
||||||
return false
|
func (s *Server) NeedLeaderElection() bool {
|
||||||
|
return s.OnlyServeWhenLeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) addr() string {
|
||||||
|
if s.Listener != nil {
|
||||||
|
return s.Listener.Addr().String()
|
||||||
|
}
|
||||||
|
return s.Server.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) serve() error {
|
||||||
|
if s.Listener != nil {
|
||||||
|
return s.Server.Serve(s.Listener)
|
||||||
|
}
|
||||||
|
return s.Server.ListenAndServe()
|
||||||
}
|
}
|
||||||
|
23
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/leaderelection.go
generated
vendored
23
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/leaderelection.go
generated
vendored
@ -14,6 +14,11 @@ var (
|
|||||||
Name: "leader_election_master_status",
|
Name: "leader_election_master_status",
|
||||||
Help: "Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name.",
|
Help: "Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name.",
|
||||||
}, []string{"name"})
|
}, []string{"name"})
|
||||||
|
|
||||||
|
leaderSlowpathCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Name: "leader_election_slowpath_total",
|
||||||
|
Help: "Total number of slow path exercised in renewing leader leases. 'name' is the string used to identify the lease. Please make sure to group by name.",
|
||||||
|
}, []string{"name"})
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -23,18 +28,20 @@ func init() {
|
|||||||
|
|
||||||
type leaderelectionMetricsProvider struct{}
|
type leaderelectionMetricsProvider struct{}
|
||||||
|
|
||||||
func (leaderelectionMetricsProvider) NewLeaderMetric() leaderelection.SwitchMetric {
|
func (leaderelectionMetricsProvider) NewLeaderMetric() leaderelection.LeaderMetric {
|
||||||
return &switchAdapter{gauge: leaderGauge}
|
return leaderElectionPrometheusAdapter{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type switchAdapter struct {
|
type leaderElectionPrometheusAdapter struct{}
|
||||||
gauge *prometheus.GaugeVec
|
|
||||||
|
func (s leaderElectionPrometheusAdapter) On(name string) {
|
||||||
|
leaderGauge.WithLabelValues(name).Set(1.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *switchAdapter) On(name string) {
|
func (s leaderElectionPrometheusAdapter) Off(name string) {
|
||||||
s.gauge.WithLabelValues(name).Set(1.0)
|
leaderGauge.WithLabelValues(name).Set(0.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *switchAdapter) Off(name string) {
|
func (leaderElectionPrometheusAdapter) SlowpathExercised(name string) {
|
||||||
s.gauge.WithLabelValues(name).Set(0.0)
|
leaderSlowpathCounter.WithLabelValues(name).Inc()
|
||||||
}
|
}
|
||||||
|
32
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go
generated
vendored
32
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go
generated
vendored
@ -46,6 +46,9 @@ var DefaultBindAddress = ":8080"
|
|||||||
|
|
||||||
// Server is a server that serves metrics.
|
// Server is a server that serves metrics.
|
||||||
type Server interface {
|
type Server interface {
|
||||||
|
// AddExtraHandler adds extra handler served on path to the http server that serves metrics.
|
||||||
|
AddExtraHandler(path string, handler http.Handler) error
|
||||||
|
|
||||||
// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates
|
// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates
|
||||||
// the metrics server doesn't need leader election.
|
// the metrics server doesn't need leader election.
|
||||||
NeedLeaderElection() bool
|
NeedLeaderElection() bool
|
||||||
@ -101,6 +104,9 @@ type Options struct {
|
|||||||
// TLSOpts is used to allow configuring the TLS config used for the server.
|
// TLSOpts is used to allow configuring the TLS config used for the server.
|
||||||
// This also allows providing a certificate via GetCertificate.
|
// This also allows providing a certificate via GetCertificate.
|
||||||
TLSOpts []func(*tls.Config)
|
TLSOpts []func(*tls.Config)
|
||||||
|
|
||||||
|
// ListenConfig contains options for listening to an address on the metric server.
|
||||||
|
ListenConfig net.ListenConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter is a func that is added around metrics and extra handlers on the metrics server.
|
// Filter is a func that is added around metrics and extra handlers on the metrics server.
|
||||||
@ -179,6 +185,23 @@ func (*defaultServer) NeedLeaderElection() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddExtraHandler adds extra handler served on path to the http server that serves metrics.
|
||||||
|
func (s *defaultServer) AddExtraHandler(path string, handler http.Handler) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.options.ExtraHandlers == nil {
|
||||||
|
s.options.ExtraHandlers = make(map[string]http.Handler)
|
||||||
|
}
|
||||||
|
if path == defaultMetricsEndpoint {
|
||||||
|
return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint)
|
||||||
|
}
|
||||||
|
if _, found := s.options.ExtraHandlers[path]; found {
|
||||||
|
return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path)
|
||||||
|
}
|
||||||
|
s.options.ExtraHandlers[path] = handler
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Start runs the server.
|
// Start runs the server.
|
||||||
// It will install the metrics related resources depend on the server configuration.
|
// It will install the metrics related resources depend on the server configuration.
|
||||||
func (s *defaultServer) Start(ctx context.Context) error {
|
func (s *defaultServer) Start(ctx context.Context) error {
|
||||||
@ -249,7 +272,7 @@ func (s *defaultServer) Start(ctx context.Context) error {
|
|||||||
|
|
||||||
func (s *defaultServer) createListener(ctx context.Context, log logr.Logger) (net.Listener, error) {
|
func (s *defaultServer) createListener(ctx context.Context, log logr.Logger) (net.Listener, error) {
|
||||||
if !s.options.SecureServing {
|
if !s.options.SecureServing {
|
||||||
return net.Listen("tcp", s.options.BindAddress)
|
return s.options.ListenConfig.Listen(ctx, "tcp", s.options.BindAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := &tls.Config{ //nolint:gosec
|
cfg := &tls.Config{ //nolint:gosec
|
||||||
@ -302,7 +325,12 @@ func (s *defaultServer) createListener(ctx context.Context, log logr.Logger) (ne
|
|||||||
cfg.Certificates = []tls.Certificate{keyPair}
|
cfg.Certificates = []tls.Certificate{keyPair}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tls.Listen("tcp", s.options.BindAddress, cfg)
|
l, err := s.options.ListenConfig.Listen(ctx, "tcp", s.options.BindAddress)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tls.NewListener(l, cfg), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *defaultServer) GetBindAddr() string {
|
func (s *defaultServer) GetBindAddr() string {
|
||||||
|
179
vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
generated
vendored
179
vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package predicate
|
package predicate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"maps"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -29,45 +30,51 @@ import (
|
|||||||
var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters")
|
var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters")
|
||||||
|
|
||||||
// Predicate filters events before enqueuing the keys.
|
// Predicate filters events before enqueuing the keys.
|
||||||
type Predicate interface {
|
type Predicate = TypedPredicate[client.Object]
|
||||||
|
|
||||||
|
// TypedPredicate filters events before enqueuing the keys.
|
||||||
|
type TypedPredicate[T any] interface {
|
||||||
// Create returns true if the Create event should be processed
|
// Create returns true if the Create event should be processed
|
||||||
Create(event.CreateEvent) bool
|
Create(event.TypedCreateEvent[T]) bool
|
||||||
|
|
||||||
// Delete returns true if the Delete event should be processed
|
// Delete returns true if the Delete event should be processed
|
||||||
Delete(event.DeleteEvent) bool
|
Delete(event.TypedDeleteEvent[T]) bool
|
||||||
|
|
||||||
// Update returns true if the Update event should be processed
|
// Update returns true if the Update event should be processed
|
||||||
Update(event.UpdateEvent) bool
|
Update(event.TypedUpdateEvent[T]) bool
|
||||||
|
|
||||||
// Generic returns true if the Generic event should be processed
|
// Generic returns true if the Generic event should be processed
|
||||||
Generic(event.GenericEvent) bool
|
Generic(event.TypedGenericEvent[T]) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Predicate = Funcs{}
|
var _ Predicate = Funcs{}
|
||||||
var _ Predicate = ResourceVersionChangedPredicate{}
|
var _ Predicate = ResourceVersionChangedPredicate{}
|
||||||
var _ Predicate = GenerationChangedPredicate{}
|
var _ Predicate = GenerationChangedPredicate{}
|
||||||
var _ Predicate = AnnotationChangedPredicate{}
|
var _ Predicate = AnnotationChangedPredicate{}
|
||||||
var _ Predicate = or{}
|
var _ Predicate = or[client.Object]{}
|
||||||
var _ Predicate = and{}
|
var _ Predicate = and[client.Object]{}
|
||||||
var _ Predicate = not{}
|
var _ Predicate = not[client.Object]{}
|
||||||
|
|
||||||
// Funcs is a function that implements Predicate.
|
// Funcs is a function that implements Predicate.
|
||||||
type Funcs struct {
|
type Funcs = TypedFuncs[client.Object]
|
||||||
|
|
||||||
|
// TypedFuncs is a function that implements TypedPredicate.
|
||||||
|
type TypedFuncs[T any] struct {
|
||||||
// Create returns true if the Create event should be processed
|
// Create returns true if the Create event should be processed
|
||||||
CreateFunc func(event.CreateEvent) bool
|
CreateFunc func(event.TypedCreateEvent[T]) bool
|
||||||
|
|
||||||
// Delete returns true if the Delete event should be processed
|
// Delete returns true if the Delete event should be processed
|
||||||
DeleteFunc func(event.DeleteEvent) bool
|
DeleteFunc func(event.TypedDeleteEvent[T]) bool
|
||||||
|
|
||||||
// Update returns true if the Update event should be processed
|
// Update returns true if the Update event should be processed
|
||||||
UpdateFunc func(event.UpdateEvent) bool
|
UpdateFunc func(event.TypedUpdateEvent[T]) bool
|
||||||
|
|
||||||
// Generic returns true if the Generic event should be processed
|
// Generic returns true if the Generic event should be processed
|
||||||
GenericFunc func(event.GenericEvent) bool
|
GenericFunc func(event.TypedGenericEvent[T]) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create implements Predicate.
|
// Create implements Predicate.
|
||||||
func (p Funcs) Create(e event.CreateEvent) bool {
|
func (p TypedFuncs[T]) Create(e event.TypedCreateEvent[T]) bool {
|
||||||
if p.CreateFunc != nil {
|
if p.CreateFunc != nil {
|
||||||
return p.CreateFunc(e)
|
return p.CreateFunc(e)
|
||||||
}
|
}
|
||||||
@ -75,7 +82,7 @@ func (p Funcs) Create(e event.CreateEvent) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements Predicate.
|
// Delete implements Predicate.
|
||||||
func (p Funcs) Delete(e event.DeleteEvent) bool {
|
func (p TypedFuncs[T]) Delete(e event.TypedDeleteEvent[T]) bool {
|
||||||
if p.DeleteFunc != nil {
|
if p.DeleteFunc != nil {
|
||||||
return p.DeleteFunc(e)
|
return p.DeleteFunc(e)
|
||||||
}
|
}
|
||||||
@ -83,7 +90,7 @@ func (p Funcs) Delete(e event.DeleteEvent) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update implements Predicate.
|
// Update implements Predicate.
|
||||||
func (p Funcs) Update(e event.UpdateEvent) bool {
|
func (p TypedFuncs[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
if p.UpdateFunc != nil {
|
if p.UpdateFunc != nil {
|
||||||
return p.UpdateFunc(e)
|
return p.UpdateFunc(e)
|
||||||
}
|
}
|
||||||
@ -91,7 +98,7 @@ func (p Funcs) Update(e event.UpdateEvent) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generic implements Predicate.
|
// Generic implements Predicate.
|
||||||
func (p Funcs) Generic(e event.GenericEvent) bool {
|
func (p TypedFuncs[T]) Generic(e event.TypedGenericEvent[T]) bool {
|
||||||
if p.GenericFunc != nil {
|
if p.GenericFunc != nil {
|
||||||
return p.GenericFunc(e)
|
return p.GenericFunc(e)
|
||||||
}
|
}
|
||||||
@ -118,6 +125,26 @@ func NewPredicateFuncs(filter func(object client.Object) bool) Funcs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewTypedPredicateFuncs returns a predicate funcs that applies the given filter function
|
||||||
|
// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied
|
||||||
|
// to the new object.
|
||||||
|
func NewTypedPredicateFuncs[T any](filter func(object T) bool) TypedFuncs[T] {
|
||||||
|
return TypedFuncs[T]{
|
||||||
|
CreateFunc: func(e event.TypedCreateEvent[T]) bool {
|
||||||
|
return filter(e.Object)
|
||||||
|
},
|
||||||
|
UpdateFunc: func(e event.TypedUpdateEvent[T]) bool {
|
||||||
|
return filter(e.ObjectNew)
|
||||||
|
},
|
||||||
|
DeleteFunc: func(e event.TypedDeleteEvent[T]) bool {
|
||||||
|
return filter(e.Object)
|
||||||
|
},
|
||||||
|
GenericFunc: func(e event.TypedGenericEvent[T]) bool {
|
||||||
|
return filter(e.Object)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ResourceVersionChangedPredicate implements a default update predicate function on resource version change.
|
// ResourceVersionChangedPredicate implements a default update predicate function on resource version change.
|
||||||
type ResourceVersionChangedPredicate struct {
|
type ResourceVersionChangedPredicate struct {
|
||||||
Funcs
|
Funcs
|
||||||
@ -153,17 +180,35 @@ func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool {
|
|||||||
//
|
//
|
||||||
// * With this predicate, any update events with writes only to the status field will not be reconciled.
|
// * With this predicate, any update events with writes only to the status field will not be reconciled.
|
||||||
// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status.
|
// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status.
|
||||||
type GenerationChangedPredicate struct {
|
type GenerationChangedPredicate = TypedGenerationChangedPredicate[client.Object]
|
||||||
Funcs
|
|
||||||
|
// TypedGenerationChangedPredicate implements a default update predicate function on Generation change.
|
||||||
|
//
|
||||||
|
// This predicate will skip update events that have no change in the object's metadata.generation field.
|
||||||
|
// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object.
|
||||||
|
// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed.
|
||||||
|
//
|
||||||
|
// For CustomResource objects the Generation is only incremented when the status subresource is enabled.
|
||||||
|
//
|
||||||
|
// Caveats:
|
||||||
|
//
|
||||||
|
// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs.
|
||||||
|
// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field.
|
||||||
|
// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to.
|
||||||
|
//
|
||||||
|
// * With this predicate, any update events with writes only to the status field will not be reconciled.
|
||||||
|
// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status.
|
||||||
|
type TypedGenerationChangedPredicate[T metav1.Object] struct {
|
||||||
|
TypedFuncs[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update implements default UpdateEvent filter for validating generation change.
|
// Update implements default UpdateEvent filter for validating generation change.
|
||||||
func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool {
|
func (TypedGenerationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
if e.ObjectOld == nil {
|
if isNil(e.ObjectOld) {
|
||||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if e.ObjectNew == nil {
|
if isNil(e.ObjectNew) {
|
||||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -183,22 +228,25 @@ func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool {
|
|||||||
//
|
//
|
||||||
// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented
|
// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented
|
||||||
// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API).
|
// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API).
|
||||||
type AnnotationChangedPredicate struct {
|
type AnnotationChangedPredicate = TypedAnnotationChangedPredicate[client.Object]
|
||||||
Funcs
|
|
||||||
|
// TypedAnnotationChangedPredicate implements a default update predicate function on annotation change.
|
||||||
|
type TypedAnnotationChangedPredicate[T metav1.Object] struct {
|
||||||
|
TypedFuncs[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update implements default UpdateEvent filter for validating annotation change.
|
// Update implements default UpdateEvent filter for validating annotation change.
|
||||||
func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool {
|
func (TypedAnnotationChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
if e.ObjectOld == nil {
|
if isNil(e.ObjectOld) {
|
||||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if e.ObjectNew == nil {
|
if isNil(e.ObjectNew) {
|
||||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations())
|
return !maps.Equal(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations())
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelChangedPredicate implements a default update predicate function on label change.
|
// LabelChangedPredicate implements a default update predicate function on label change.
|
||||||
@ -214,34 +262,37 @@ func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool {
|
|||||||
//
|
//
|
||||||
// This will be helpful when object's labels is carrying some extra specification information beyond object's spec,
|
// This will be helpful when object's labels is carrying some extra specification information beyond object's spec,
|
||||||
// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens.
|
// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens.
|
||||||
type LabelChangedPredicate struct {
|
type LabelChangedPredicate = TypedLabelChangedPredicate[client.Object]
|
||||||
Funcs
|
|
||||||
|
// TypedLabelChangedPredicate implements a default update predicate function on label change.
|
||||||
|
type TypedLabelChangedPredicate[T metav1.Object] struct {
|
||||||
|
TypedFuncs[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update implements default UpdateEvent filter for checking label change.
|
// Update implements default UpdateEvent filter for checking label change.
|
||||||
func (LabelChangedPredicate) Update(e event.UpdateEvent) bool {
|
func (TypedLabelChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
if e.ObjectOld == nil {
|
if isNil(e.ObjectOld) {
|
||||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if e.ObjectNew == nil {
|
if isNil(e.ObjectNew) {
|
||||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels())
|
return !maps.Equal(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels())
|
||||||
}
|
}
|
||||||
|
|
||||||
// And returns a composite predicate that implements a logical AND of the predicates passed to it.
|
// And returns a composite predicate that implements a logical AND of the predicates passed to it.
|
||||||
func And(predicates ...Predicate) Predicate {
|
func And[T any](predicates ...TypedPredicate[T]) TypedPredicate[T] {
|
||||||
return and{predicates}
|
return and[T]{predicates}
|
||||||
}
|
}
|
||||||
|
|
||||||
type and struct {
|
type and[T any] struct {
|
||||||
predicates []Predicate
|
predicates []TypedPredicate[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a and) Create(e event.CreateEvent) bool {
|
func (a and[T]) Create(e event.TypedCreateEvent[T]) bool {
|
||||||
for _, p := range a.predicates {
|
for _, p := range a.predicates {
|
||||||
if !p.Create(e) {
|
if !p.Create(e) {
|
||||||
return false
|
return false
|
||||||
@ -250,7 +301,7 @@ func (a and) Create(e event.CreateEvent) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a and) Update(e event.UpdateEvent) bool {
|
func (a and[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
for _, p := range a.predicates {
|
for _, p := range a.predicates {
|
||||||
if !p.Update(e) {
|
if !p.Update(e) {
|
||||||
return false
|
return false
|
||||||
@ -259,7 +310,7 @@ func (a and) Update(e event.UpdateEvent) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a and) Delete(e event.DeleteEvent) bool {
|
func (a and[T]) Delete(e event.TypedDeleteEvent[T]) bool {
|
||||||
for _, p := range a.predicates {
|
for _, p := range a.predicates {
|
||||||
if !p.Delete(e) {
|
if !p.Delete(e) {
|
||||||
return false
|
return false
|
||||||
@ -268,7 +319,7 @@ func (a and) Delete(e event.DeleteEvent) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a and) Generic(e event.GenericEvent) bool {
|
func (a and[T]) Generic(e event.TypedGenericEvent[T]) bool {
|
||||||
for _, p := range a.predicates {
|
for _, p := range a.predicates {
|
||||||
if !p.Generic(e) {
|
if !p.Generic(e) {
|
||||||
return false
|
return false
|
||||||
@ -278,15 +329,15 @@ func (a and) Generic(e event.GenericEvent) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Or returns a composite predicate that implements a logical OR of the predicates passed to it.
|
// Or returns a composite predicate that implements a logical OR of the predicates passed to it.
|
||||||
func Or(predicates ...Predicate) Predicate {
|
func Or[T any](predicates ...TypedPredicate[T]) TypedPredicate[T] {
|
||||||
return or{predicates}
|
return or[T]{predicates}
|
||||||
}
|
}
|
||||||
|
|
||||||
type or struct {
|
type or[T any] struct {
|
||||||
predicates []Predicate
|
predicates []TypedPredicate[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o or) Create(e event.CreateEvent) bool {
|
func (o or[T]) Create(e event.TypedCreateEvent[T]) bool {
|
||||||
for _, p := range o.predicates {
|
for _, p := range o.predicates {
|
||||||
if p.Create(e) {
|
if p.Create(e) {
|
||||||
return true
|
return true
|
||||||
@ -295,7 +346,7 @@ func (o or) Create(e event.CreateEvent) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o or) Update(e event.UpdateEvent) bool {
|
func (o or[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
for _, p := range o.predicates {
|
for _, p := range o.predicates {
|
||||||
if p.Update(e) {
|
if p.Update(e) {
|
||||||
return true
|
return true
|
||||||
@ -304,7 +355,7 @@ func (o or) Update(e event.UpdateEvent) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o or) Delete(e event.DeleteEvent) bool {
|
func (o or[T]) Delete(e event.TypedDeleteEvent[T]) bool {
|
||||||
for _, p := range o.predicates {
|
for _, p := range o.predicates {
|
||||||
if p.Delete(e) {
|
if p.Delete(e) {
|
||||||
return true
|
return true
|
||||||
@ -313,7 +364,7 @@ func (o or) Delete(e event.DeleteEvent) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o or) Generic(e event.GenericEvent) bool {
|
func (o or[T]) Generic(e event.TypedGenericEvent[T]) bool {
|
||||||
for _, p := range o.predicates {
|
for _, p := range o.predicates {
|
||||||
if p.Generic(e) {
|
if p.Generic(e) {
|
||||||
return true
|
return true
|
||||||
@ -323,27 +374,27 @@ func (o or) Generic(e event.GenericEvent) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Not returns a predicate that implements a logical NOT of the predicate passed to it.
|
// Not returns a predicate that implements a logical NOT of the predicate passed to it.
|
||||||
func Not(predicate Predicate) Predicate {
|
func Not[T any](predicate TypedPredicate[T]) TypedPredicate[T] {
|
||||||
return not{predicate}
|
return not[T]{predicate}
|
||||||
}
|
}
|
||||||
|
|
||||||
type not struct {
|
type not[T any] struct {
|
||||||
predicate Predicate
|
predicate TypedPredicate[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n not) Create(e event.CreateEvent) bool {
|
func (n not[T]) Create(e event.TypedCreateEvent[T]) bool {
|
||||||
return !n.predicate.Create(e)
|
return !n.predicate.Create(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n not) Update(e event.UpdateEvent) bool {
|
func (n not[T]) Update(e event.TypedUpdateEvent[T]) bool {
|
||||||
return !n.predicate.Update(e)
|
return !n.predicate.Update(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n not) Delete(e event.DeleteEvent) bool {
|
func (n not[T]) Delete(e event.TypedDeleteEvent[T]) bool {
|
||||||
return !n.predicate.Delete(e)
|
return !n.predicate.Delete(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n not) Generic(e event.GenericEvent) bool {
|
func (n not[T]) Generic(e event.TypedGenericEvent[T]) bool {
|
||||||
return !n.predicate.Generic(e)
|
return !n.predicate.Generic(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,3 +409,15 @@ func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) {
|
|||||||
return selector.Matches(labels.Set(o.GetLabels()))
|
return selector.Matches(labels.Set(o.GetLabels()))
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isNil(arg any) bool {
|
||||||
|
if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr ||
|
||||||
|
v.Kind() == reflect.Interface ||
|
||||||
|
v.Kind() == reflect.Slice ||
|
||||||
|
v.Kind() == reflect.Map ||
|
||||||
|
v.Kind() == reflect.Chan ||
|
||||||
|
v.Kind() == reflect.Func) && v.IsNil()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
93
vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
generated
vendored
93
vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
generated
vendored
@ -1,93 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2018 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package scheme contains utilities for gradually building Schemes,
|
|
||||||
// which contain information associating Go types with Kubernetes
|
|
||||||
// groups, versions, and kinds.
|
|
||||||
//
|
|
||||||
// Each API group should define a utility function
|
|
||||||
// called AddToScheme for adding its types to a Scheme:
|
|
||||||
//
|
|
||||||
// // in package myapigroupv1...
|
|
||||||
// var (
|
|
||||||
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
|
|
||||||
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
|
||||||
// AddToScheme = SchemeBuilder.AddToScheme
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
|
|
||||||
// }
|
|
||||||
// var (
|
|
||||||
// scheme *runtime.Scheme = runtime.NewScheme()
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// This also true of the built-in Kubernetes types. Then, in the entrypoint for
|
|
||||||
// your manager, assemble the scheme containing exactly the types you need,
|
|
||||||
// panicing if scheme registration failed. For instance, if our controller needs
|
|
||||||
// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
|
|
||||||
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func main() {
|
|
||||||
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
|
|
||||||
// Scheme: scheme,
|
|
||||||
// })
|
|
||||||
// // ...
|
|
||||||
// }
|
|
||||||
package scheme
|
|
||||||
|
|
||||||
import (
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
|
|
||||||
type Builder struct {
|
|
||||||
GroupVersion schema.GroupVersion
|
|
||||||
runtime.SchemeBuilder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld.
|
|
||||||
func (bld *Builder) Register(object ...runtime.Object) *Builder {
|
|
||||||
bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error {
|
|
||||||
scheme.AddKnownTypes(bld.GroupVersion, object...)
|
|
||||||
metav1.AddToGroupVersion(scheme, bld.GroupVersion)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return bld
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld.
|
|
||||||
func (bld *Builder) RegisterAll(b *Builder) *Builder {
|
|
||||||
bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...)
|
|
||||||
return bld
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToScheme adds all registered types to s.
|
|
||||||
func (bld *Builder) AddToScheme(s *runtime.Scheme) error {
|
|
||||||
return bld.SchemeBuilder.AddToScheme(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build returns a new Scheme containing the registered types.
|
|
||||||
func (bld *Builder) Build() (*runtime.Scheme, error) {
|
|
||||||
s := runtime.NewScheme()
|
|
||||||
return s, bld.AddToScheme(s)
|
|
||||||
}
|
|
124
vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go
generated
vendored
124
vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go
generated
vendored
@ -18,10 +18,12 @@ package source
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
@ -31,23 +33,18 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// Source is a source of events (e.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc)
|
||||||
// defaultBufferSize is the default number of event notifications that can be buffered.
|
|
||||||
defaultBufferSize = 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
// Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc)
|
|
||||||
// which should be processed by event.EventHandlers to enqueue reconcile.Requests.
|
// which should be processed by event.EventHandlers to enqueue reconcile.Requests.
|
||||||
//
|
//
|
||||||
// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update).
|
// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update).
|
||||||
//
|
//
|
||||||
// * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls).
|
// * Use Channel for events originating outside the cluster (e.g. GitHub Webhook callback, Polling external urls).
|
||||||
//
|
//
|
||||||
// Users may build their own Source implementations.
|
// Users may build their own Source implementations.
|
||||||
type Source interface {
|
type Source interface {
|
||||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||||
// to enqueue reconcile.Requests.
|
// to enqueue reconcile.Requests.
|
||||||
Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
Start(context.Context, workqueue.RateLimitingInterface) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncingSource is a source that needs syncing prior to being usable. The controller
|
// SyncingSource is a source that needs syncing prior to being usable. The controller
|
||||||
@ -58,54 +55,92 @@ type SyncingSource interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Kind creates a KindSource with the given cache provider.
|
// Kind creates a KindSource with the given cache provider.
|
||||||
func Kind(cache cache.Cache, object client.Object) SyncingSource {
|
func Kind[T client.Object](cache cache.Cache, object T, handler handler.TypedEventHandler[T], predicates ...predicate.TypedPredicate[T]) SyncingSource {
|
||||||
return &internal.Kind{Type: object, Cache: cache}
|
return &internal.Kind[T]{
|
||||||
|
Type: object,
|
||||||
|
Cache: cache,
|
||||||
|
Handler: handler,
|
||||||
|
Predicates: predicates,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Source = &Channel{}
|
var _ Source = &channel[string]{}
|
||||||
|
|
||||||
|
// ChannelOpt allows to configure a source.Channel.
|
||||||
|
type ChannelOpt[T any] func(*channel[T])
|
||||||
|
|
||||||
|
// WithPredicates adds the configured predicates to a source.Channel.
|
||||||
|
func WithPredicates[T any](p ...predicate.TypedPredicate[T]) ChannelOpt[T] {
|
||||||
|
return func(c *channel[T]) {
|
||||||
|
c.predicates = append(c.predicates, p...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBufferSize configures the buffer size for a source.Channel. By
|
||||||
|
// default, the buffer size is 1024.
|
||||||
|
func WithBufferSize[T any](bufferSize int) ChannelOpt[T] {
|
||||||
|
return func(c *channel[T]) {
|
||||||
|
c.bufferSize = &bufferSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Channel is used to provide a source of events originating outside the cluster
|
// Channel is used to provide a source of events originating outside the cluster
|
||||||
// (e.g. GitHub Webhook callback). Channel requires the user to wire the external
|
// (e.g. GitHub Webhook callback). Channel requires the user to wire the external
|
||||||
// source (eh.g. http handler) to write GenericEvents to the underlying channel.
|
// source (e.g. http handler) to write GenericEvents to the underlying channel.
|
||||||
type Channel struct {
|
func Channel[T any](source <-chan event.TypedGenericEvent[T], handler handler.TypedEventHandler[T], opts ...ChannelOpt[T]) Source {
|
||||||
|
c := &channel[T]{
|
||||||
|
source: source,
|
||||||
|
handler: handler,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type channel[T any] struct {
|
||||||
// once ensures the event distribution goroutine will be performed only once
|
// once ensures the event distribution goroutine will be performed only once
|
||||||
once sync.Once
|
once sync.Once
|
||||||
|
|
||||||
// Source is the source channel to fetch GenericEvents
|
// source is the source channel to fetch GenericEvents
|
||||||
Source <-chan event.GenericEvent
|
source <-chan event.TypedGenericEvent[T]
|
||||||
|
|
||||||
|
handler handler.TypedEventHandler[T]
|
||||||
|
|
||||||
|
predicates []predicate.TypedPredicate[T]
|
||||||
|
|
||||||
|
bufferSize *int
|
||||||
|
|
||||||
// dest is the destination channels of the added event handlers
|
// dest is the destination channels of the added event handlers
|
||||||
dest []chan event.GenericEvent
|
dest []chan event.TypedGenericEvent[T]
|
||||||
|
|
||||||
// DestBufferSize is the specified buffer size of dest channels.
|
|
||||||
// Default to 1024 if not specified.
|
|
||||||
DestBufferSize int
|
|
||||||
|
|
||||||
// destLock is to ensure the destination channels are safely added/removed
|
// destLock is to ensure the destination channels are safely added/removed
|
||||||
destLock sync.Mutex
|
destLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *Channel) String() string {
|
func (cs *channel[T]) String() string {
|
||||||
return fmt.Sprintf("channel source: %p", cs)
|
return fmt.Sprintf("channel source: %p", cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start implements Source and should only be called by the Controller.
|
// Start implements Source and should only be called by the Controller.
|
||||||
func (cs *Channel) Start(
|
func (cs *channel[T]) Start(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
handler handler.EventHandler,
|
|
||||||
queue workqueue.RateLimitingInterface,
|
queue workqueue.RateLimitingInterface,
|
||||||
prct ...predicate.Predicate) error {
|
) error {
|
||||||
// Source should have been specified by the user.
|
// Source should have been specified by the user.
|
||||||
if cs.Source == nil {
|
if cs.source == nil {
|
||||||
return fmt.Errorf("must specify Channel.Source")
|
return fmt.Errorf("must specify Channel.Source")
|
||||||
}
|
}
|
||||||
|
if cs.handler == nil {
|
||||||
// use default value if DestBufferSize not specified
|
return errors.New("must specify Channel.Handler")
|
||||||
if cs.DestBufferSize == 0 {
|
|
||||||
cs.DestBufferSize = defaultBufferSize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dst := make(chan event.GenericEvent, cs.DestBufferSize)
|
if cs.bufferSize == nil {
|
||||||
|
cs.bufferSize = ptr.To(1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := make(chan event.TypedGenericEvent[T], *cs.bufferSize)
|
||||||
|
|
||||||
cs.destLock.Lock()
|
cs.destLock.Lock()
|
||||||
cs.dest = append(cs.dest, dst)
|
cs.dest = append(cs.dest, dst)
|
||||||
@ -119,7 +154,7 @@ func (cs *Channel) Start(
|
|||||||
go func() {
|
go func() {
|
||||||
for evt := range dst {
|
for evt := range dst {
|
||||||
shouldHandle := true
|
shouldHandle := true
|
||||||
for _, p := range prct {
|
for _, p := range cs.predicates {
|
||||||
if !p.Generic(evt) {
|
if !p.Generic(evt) {
|
||||||
shouldHandle = false
|
shouldHandle = false
|
||||||
break
|
break
|
||||||
@ -130,7 +165,7 @@ func (cs *Channel) Start(
|
|||||||
func() {
|
func() {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
handler.Generic(ctx, evt, queue)
|
cs.handler.Generic(ctx, evt, queue)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -139,7 +174,7 @@ func (cs *Channel) Start(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *Channel) doStop() {
|
func (cs *channel[T]) doStop() {
|
||||||
cs.destLock.Lock()
|
cs.destLock.Lock()
|
||||||
defer cs.destLock.Unlock()
|
defer cs.destLock.Unlock()
|
||||||
|
|
||||||
@ -148,7 +183,7 @@ func (cs *Channel) doStop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *Channel) distribute(evt event.GenericEvent) {
|
func (cs *channel[T]) distribute(evt event.TypedGenericEvent[T]) {
|
||||||
cs.destLock.Lock()
|
cs.destLock.Lock()
|
||||||
defer cs.destLock.Unlock()
|
defer cs.destLock.Unlock()
|
||||||
|
|
||||||
@ -162,14 +197,14 @@ func (cs *Channel) distribute(evt event.GenericEvent) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *Channel) syncLoop(ctx context.Context) {
|
func (cs *channel[T]) syncLoop(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
// Close destination channels
|
// Close destination channels
|
||||||
cs.doStop()
|
cs.doStop()
|
||||||
return
|
return
|
||||||
case evt, stillOpen := <-cs.Source:
|
case evt, stillOpen := <-cs.source:
|
||||||
if !stillOpen {
|
if !stillOpen {
|
||||||
// if the source channel is closed, we're never gonna get
|
// if the source channel is closed, we're never gonna get
|
||||||
// anything more on it, so stop & bail
|
// anything more on it, so stop & bail
|
||||||
@ -185,20 +220,24 @@ func (cs *Channel) syncLoop(ctx context.Context) {
|
|||||||
type Informer struct {
|
type Informer struct {
|
||||||
// Informer is the controller-runtime Informer
|
// Informer is the controller-runtime Informer
|
||||||
Informer cache.Informer
|
Informer cache.Informer
|
||||||
|
Handler handler.EventHandler
|
||||||
|
Predicates []predicate.Predicate
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Source = &Informer{}
|
var _ Source = &Informer{}
|
||||||
|
|
||||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||||
// to enqueue reconcile.Requests.
|
// to enqueue reconcile.Requests.
|
||||||
func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
func (is *Informer) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error {
|
||||||
prct ...predicate.Predicate) error {
|
|
||||||
// Informer should have been specified by the user.
|
// Informer should have been specified by the user.
|
||||||
if is.Informer == nil {
|
if is.Informer == nil {
|
||||||
return fmt.Errorf("must specify Informer.Informer")
|
return fmt.Errorf("must specify Informer.Informer")
|
||||||
}
|
}
|
||||||
|
if is.Handler == nil {
|
||||||
|
return errors.New("must specify Informer.Handler")
|
||||||
|
}
|
||||||
|
|
||||||
_, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs())
|
_, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, is.Handler, is.Predicates).HandlerFuncs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -212,12 +251,11 @@ func (is *Informer) String() string {
|
|||||||
var _ Source = Func(nil)
|
var _ Source = Func(nil)
|
||||||
|
|
||||||
// Func is a function that implements Source.
|
// Func is a function that implements Source.
|
||||||
type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
type Func func(context.Context, workqueue.RateLimitingInterface) error
|
||||||
|
|
||||||
// Start implements Source.
|
// Start implements Source.
|
||||||
func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface,
|
func (f Func) Start(ctx context.Context, queue workqueue.RateLimitingInterface) error {
|
||||||
pr ...predicate.Predicate) error {
|
return f(ctx, queue)
|
||||||
return f(ctx, evt, queue, pr...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f Func) String() string {
|
func (f Func) String() string {
|
||||||
|
25
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go
generated
vendored
25
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go
generated
vendored
@ -26,22 +26,35 @@ import (
|
|||||||
|
|
||||||
// Decoder knows how to decode the contents of an admission
|
// Decoder knows how to decode the contents of an admission
|
||||||
// request into a concrete object.
|
// request into a concrete object.
|
||||||
type Decoder struct {
|
type Decoder interface {
|
||||||
|
// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object.
|
||||||
|
// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw.
|
||||||
|
// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes.
|
||||||
|
Decode(req Request, into runtime.Object) error
|
||||||
|
|
||||||
|
// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object.
|
||||||
|
// It errors out if rawObj is empty i.e. containing 0 raw bytes.
|
||||||
|
DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// decoder knows how to decode the contents of an admission
|
||||||
|
// request into a concrete object.
|
||||||
|
type decoder struct {
|
||||||
codecs serializer.CodecFactory
|
codecs serializer.CodecFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDecoder creates a Decoder given the runtime.Scheme.
|
// NewDecoder creates a decoder given the runtime.Scheme.
|
||||||
func NewDecoder(scheme *runtime.Scheme) *Decoder {
|
func NewDecoder(scheme *runtime.Scheme) Decoder {
|
||||||
if scheme == nil {
|
if scheme == nil {
|
||||||
panic("scheme should never be nil")
|
panic("scheme should never be nil")
|
||||||
}
|
}
|
||||||
return &Decoder{codecs: serializer.NewCodecFactory(scheme)}
|
return &decoder{codecs: serializer.NewCodecFactory(scheme)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object.
|
// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object.
|
||||||
// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw.
|
// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw.
|
||||||
// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes.
|
// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes.
|
||||||
func (d *Decoder) Decode(req Request, into runtime.Object) error {
|
func (d *decoder) Decode(req Request, into runtime.Object) error {
|
||||||
// we error out if rawObj is an empty object.
|
// we error out if rawObj is an empty object.
|
||||||
if len(req.Object.Raw) == 0 {
|
if len(req.Object.Raw) == 0 {
|
||||||
return fmt.Errorf("there is no content to decode")
|
return fmt.Errorf("there is no content to decode")
|
||||||
@ -51,7 +64,7 @@ func (d *Decoder) Decode(req Request, into runtime.Object) error {
|
|||||||
|
|
||||||
// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object.
|
// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object.
|
||||||
// It errors out if rawObj is empty i.e. containing 0 raw bytes.
|
// It errors out if rawObj is empty i.e. containing 0 raw bytes.
|
||||||
func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error {
|
func (d *decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error {
|
||||||
// NB(directxman12): there's a bug/weird interaction between decoders and
|
// NB(directxman12): there's a bug/weird interaction between decoders and
|
||||||
// the API server where the API server doesn't send a GVK on the embedded
|
// the API server where the API server doesn't send a GVK on the embedded
|
||||||
// objects, which means the unstructured decoder refuses to decode. It
|
// objects, which means the unstructured decoder refuses to decode. It
|
||||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go
generated
vendored
@ -43,7 +43,7 @@ func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook
|
|||||||
|
|
||||||
type mutatingHandler struct {
|
type mutatingHandler struct {
|
||||||
defaulter Defaulter
|
defaulter Defaulter
|
||||||
decoder *Decoder
|
decoder Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle handles admission requests.
|
// Handle handles admission requests.
|
||||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go
generated
vendored
@ -43,7 +43,7 @@ func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter C
|
|||||||
type defaulterForType struct {
|
type defaulterForType struct {
|
||||||
defaulter CustomDefaulter
|
defaulter CustomDefaulter
|
||||||
object runtime.Object
|
object runtime.Object
|
||||||
decoder *Decoder
|
decoder Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle handles admission requests.
|
// Handle handles admission requests.
|
||||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go
generated
vendored
@ -63,7 +63,7 @@ func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook
|
|||||||
|
|
||||||
type validatingHandler struct {
|
type validatingHandler struct {
|
||||||
validator Validator
|
validator Validator
|
||||||
decoder *Decoder
|
decoder Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle handles admission requests.
|
// Handle handles admission requests.
|
||||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go
generated
vendored
@ -56,7 +56,7 @@ func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator C
|
|||||||
type validatorForType struct {
|
type validatorForType struct {
|
||||||
validator CustomValidator
|
validator CustomValidator
|
||||||
object runtime.Object
|
object runtime.Object
|
||||||
decoder *Decoder
|
decoder Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle handles admission requests.
|
// Handle handles admission requests.
|
||||||
|
Loading…
Reference in New Issue
Block a user