mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: bump the k8s-dependencies group with 2 updates
Bumps the k8s-dependencies group with 2 updates: [k8s.io/klog/v2](https://github.com/kubernetes/klog) and [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime). Updates `k8s.io/klog/v2` from 2.120.0 to 2.120.1 - [Release notes](https://github.com/kubernetes/klog/releases) - [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes/klog/compare/v2.120.0...v2.120.1) Updates `sigs.k8s.io/controller-runtime` from 0.16.3 to 0.17.0 - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.16.3...v0.17.0) --- updated-dependencies: - dependency-name: k8s.io/klog/v2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
2217d106c4
commit
ea86bf7d83
18
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
18
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
toolscache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@ -83,6 +83,9 @@ type Informers interface {
|
||||
// of the underlying object.
|
||||
GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error)
|
||||
|
||||
// RemoveInformer removes an informer entry and stops it if it was running.
|
||||
RemoveInformer(ctx context.Context, obj client.Object) error
|
||||
|
||||
// Start runs all the informers known to this cache until the context is closed.
|
||||
// It blocks.
|
||||
Start(ctx context.Context) error
|
||||
@ -121,6 +124,8 @@ type Informer interface {
|
||||
|
||||
// HasSynced return true if the informers underlying store has synced.
|
||||
HasSynced() bool
|
||||
// IsStopped returns true if the informer has been stopped.
|
||||
IsStopped() bool
|
||||
}
|
||||
|
||||
// AllNamespaces should be used as the map key to deliminate namespace settings
|
||||
@ -199,6 +204,12 @@ type Options struct {
|
||||
// unless there is already one set in ByObject or DefaultNamespaces.
|
||||
DefaultTransform toolscache.TransformFunc
|
||||
|
||||
// DefaultWatchErrorHandler will be used to the WatchErrorHandler which is called
|
||||
// whenever ListAndWatch drops the connection with an error.
|
||||
//
|
||||
// After calling this handler, the informer will backoff and retry.
|
||||
DefaultWatchErrorHandler toolscache.WatchErrorHandler
|
||||
|
||||
// DefaultUnsafeDisableDeepCopy is the default for UnsafeDisableDeepCopy
|
||||
// for everything that doesn't specify this.
|
||||
//
|
||||
@ -369,7 +380,8 @@ func newCache(restConfig *rest.Config, opts Options) newCacheFunc {
|
||||
Field: config.FieldSelector,
|
||||
},
|
||||
Transform: config.Transform,
|
||||
UnsafeDisableDeepCopy: pointer.BoolDeref(config.UnsafeDisableDeepCopy, false),
|
||||
WatchErrorHandler: opts.DefaultWatchErrorHandler,
|
||||
UnsafeDisableDeepCopy: ptr.Deref(config.UnsafeDisableDeepCopy, false),
|
||||
NewInformer: opts.newInformer,
|
||||
}),
|
||||
readerFailOnMissingInformer: opts.ReaderFailOnMissingInformer,
|
||||
@ -400,7 +412,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
|
||||
// Construct a new Mapper if unset
|
||||
if opts.Mapper == nil {
|
||||
var err error
|
||||
opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient)
|
||||
opts.Mapper, err = apiutil.NewDynamicRESTMapper(config, opts.HTTPClient)
|
||||
if err != nil {
|
||||
return Options{}, fmt.Errorf("could not create RESTMapper from config: %w", err)
|
||||
}
|
||||
|
8
vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go
generated
vendored
8
vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go
generated
vendored
@ -52,6 +52,14 @@ func (dbt *delegatingByGVKCache) List(ctx context.Context, list client.ObjectLis
|
||||
return cache.List(ctx, list, opts...)
|
||||
}
|
||||
|
||||
func (dbt *delegatingByGVKCache) RemoveInformer(ctx context.Context, obj client.Object) error {
|
||||
cache, err := dbt.cacheForObject(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cache.RemoveInformer(ctx, obj)
|
||||
}
|
||||
|
||||
func (dbt *delegatingByGVKCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) {
|
||||
cache, err := dbt.cacheForObject(obj)
|
||||
if err != nil {
|
||||
|
11
vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go
generated
vendored
11
vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go
generated
vendored
@ -190,6 +190,17 @@ func (ic *informerCache) getInformerForKind(ctx context.Context, gvk schema.Grou
|
||||
return ic.Informers.Get(ctx, gvk, obj, &internal.GetOptions{})
|
||||
}
|
||||
|
||||
// RemoveInformer deactivates and removes the informer from the cache.
|
||||
func (ic *informerCache) RemoveInformer(_ context.Context, obj client.Object) error {
|
||||
gvk, err := apiutil.GVKForObject(obj, ic.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ic.Informers.Remove(gvk, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NeedLeaderElection implements the LeaderElectionRunnable interface
|
||||
// to indicate that this can be started without requiring the leader lock.
|
||||
func (ic *informerCache) NeedLeaderElection() bool {
|
||||
|
55
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
55
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -117,16 +118,14 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli
|
||||
|
||||
switch {
|
||||
case listOpts.FieldSelector != nil:
|
||||
// TODO(directxman12): support more complicated field selectors by
|
||||
// combining multiple indices, GetIndexers, etc
|
||||
field, val, requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector)
|
||||
requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector)
|
||||
if !requiresExact {
|
||||
return fmt.Errorf("non-exact field matches are not supported by the cache")
|
||||
}
|
||||
// list all objects by the field selector. If this is namespaced and we have one, ask for the
|
||||
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
|
||||
// namespace.
|
||||
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
|
||||
objs, err = byIndexes(c.indexer, listOpts.FieldSelector.Requirements(), listOpts.Namespace)
|
||||
case listOpts.Namespace != "":
|
||||
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace)
|
||||
default:
|
||||
@ -178,6 +177,54 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli
|
||||
return apimeta.SetList(out, runtimeObjs)
|
||||
}
|
||||
|
||||
func byIndexes(indexer cache.Indexer, requires fields.Requirements, namespace string) ([]interface{}, error) {
|
||||
var (
|
||||
err error
|
||||
objs []interface{}
|
||||
vals []string
|
||||
)
|
||||
indexers := indexer.GetIndexers()
|
||||
for idx, req := range requires {
|
||||
indexName := FieldIndexName(req.Field)
|
||||
indexedValue := KeyToNamespacedKey(namespace, req.Value)
|
||||
if idx == 0 {
|
||||
// we use first require to get snapshot data
|
||||
// TODO(halfcrazy): use complicated index when client-go provides byIndexes
|
||||
// https://github.com/kubernetes/kubernetes/issues/109329
|
||||
objs, err = indexer.ByIndex(indexName, indexedValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(objs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
fn, exist := indexers[indexName]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("index with name %s does not exist", indexName)
|
||||
}
|
||||
filteredObjects := make([]interface{}, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
vals, err = fn(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, val := range vals {
|
||||
if val == indexedValue {
|
||||
filteredObjects = append(filteredObjects, obj)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filteredObjects) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
objs = filteredObjects
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
// objectKeyToStorageKey converts an object key to store key.
|
||||
// It's akin to MetaNamespaceKeyFunc. It's separate from
|
||||
// String to allow keeping the key format easily in sync with
|
||||
|
57
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
generated
vendored
57
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/internal/syncs"
|
||||
)
|
||||
|
||||
// InformersOpts configures an InformerMap.
|
||||
@ -49,6 +50,7 @@ type InformersOpts struct {
|
||||
Selector Selector
|
||||
Transform cache.TransformFunc
|
||||
UnsafeDisableDeepCopy bool
|
||||
WatchErrorHandler cache.WatchErrorHandler
|
||||
}
|
||||
|
||||
// NewInformers creates a new InformersMap that can create informers under the hood.
|
||||
@ -76,6 +78,7 @@ func NewInformers(config *rest.Config, options *InformersOpts) *Informers {
|
||||
transform: options.Transform,
|
||||
unsafeDisableDeepCopy: options.UnsafeDisableDeepCopy,
|
||||
newInformer: newInformer,
|
||||
watchErrorHandler: options.WatchErrorHandler,
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,6 +89,20 @@ type Cache struct {
|
||||
|
||||
// CacheReader wraps Informer and implements the CacheReader interface for a single type
|
||||
Reader CacheReader
|
||||
|
||||
// Stop can be used to stop this individual informer.
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// Start starts the informer managed by a MapEntry.
|
||||
// Blocks until the informer stops. The informer can be stopped
|
||||
// either individually (via the entry's stop channel) or globally
|
||||
// via the provided stop argument.
|
||||
func (c *Cache) Start(stop <-chan struct{}) {
|
||||
// Stop on either the whole map stopping or just this informer being removed.
|
||||
internalStop, cancel := syncs.MergeChans(stop, c.stop)
|
||||
defer cancel()
|
||||
c.Informer.Run(internalStop)
|
||||
}
|
||||
|
||||
type tracker struct {
|
||||
@ -159,6 +176,11 @@ type Informers struct {
|
||||
|
||||
// NewInformer allows overriding of the shared index informer constructor for testing.
|
||||
newInformer func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer
|
||||
|
||||
// WatchErrorHandler allows the shared index informer's
|
||||
// watchErrorHandler to be set by overriding the options
|
||||
// or to use the default watchErrorHandler
|
||||
watchErrorHandler cache.WatchErrorHandler
|
||||
}
|
||||
|
||||
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
|
||||
@ -173,13 +195,13 @@ func (ip *Informers) Start(ctx context.Context) error {
|
||||
|
||||
// Start each informer
|
||||
for _, i := range ip.tracker.Structured {
|
||||
ip.startInformerLocked(i.Informer)
|
||||
ip.startInformerLocked(i)
|
||||
}
|
||||
for _, i := range ip.tracker.Unstructured {
|
||||
ip.startInformerLocked(i.Informer)
|
||||
ip.startInformerLocked(i)
|
||||
}
|
||||
for _, i := range ip.tracker.Metadata {
|
||||
ip.startInformerLocked(i.Informer)
|
||||
ip.startInformerLocked(i)
|
||||
}
|
||||
|
||||
// Set started to true so we immediately start any informers added later.
|
||||
@ -194,7 +216,7 @@ func (ip *Informers) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) {
|
||||
func (ip *Informers) startInformerLocked(cacheEntry *Cache) {
|
||||
// Don't start the informer in case we are already waiting for the items in
|
||||
// the waitGroup to finish, since waitGroups don't support waiting and adding
|
||||
// at the same time.
|
||||
@ -205,7 +227,7 @@ func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) {
|
||||
ip.waitGroup.Add(1)
|
||||
go func() {
|
||||
defer ip.waitGroup.Done()
|
||||
informer.Run(ip.ctx.Done())
|
||||
cacheEntry.Start(ip.ctx.Done())
|
||||
}()
|
||||
}
|
||||
|
||||
@ -281,6 +303,21 @@ func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj r
|
||||
return started, i, nil
|
||||
}
|
||||
|
||||
// Remove removes an informer entry and stops it if it was running.
|
||||
func (ip *Informers) Remove(gvk schema.GroupVersionKind, obj runtime.Object) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
|
||||
informerMap := ip.informersByType(obj)
|
||||
|
||||
entry, ok := informerMap[gvk]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
close(entry.stop)
|
||||
delete(informerMap, gvk)
|
||||
}
|
||||
|
||||
func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache {
|
||||
switch obj.(type) {
|
||||
case runtime.Unstructured:
|
||||
@ -323,6 +360,13 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O
|
||||
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
|
||||
})
|
||||
|
||||
// Set WatchErrorHandler on SharedIndexInformer if set
|
||||
if ip.watchErrorHandler != nil {
|
||||
if err := sharedIndexInformer.SetWatchErrorHandler(ip.watchErrorHandler); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if there is a transformer for this gvk
|
||||
if err := sharedIndexInformer.SetTransform(ip.transform); err != nil {
|
||||
return nil, false, err
|
||||
@ -342,13 +386,14 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O
|
||||
scopeName: mapping.Scope.Name(),
|
||||
disableDeepCopy: ip.unsafeDisableDeepCopy,
|
||||
},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
ip.informersByType(obj)[gvk] = i
|
||||
|
||||
// Start the informer in case the InformersMap has started, otherwise it will be
|
||||
// started when the InformersMap starts.
|
||||
if ip.started {
|
||||
ip.startInformerLocked(i.Informer)
|
||||
ip.startInformerLocked(i)
|
||||
}
|
||||
return i, ip.started, nil
|
||||
}
|
||||
|
31
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
31
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
@ -109,6 +109,27 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object
|
||||
return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) RemoveInformer(ctx context.Context, obj client.Object) error {
|
||||
// If the object is clusterscoped, get the informer from clusterCache,
|
||||
// if not use the namespaced caches.
|
||||
isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNamespaced {
|
||||
return c.clusterCache.RemoveInformer(ctx, obj)
|
||||
}
|
||||
|
||||
for _, cache := range c.namespaceToCache {
|
||||
err := cache.RemoveInformer(ctx, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) {
|
||||
// If the object is cluster scoped, get the informer from clusterCache,
|
||||
// if not use the namespaced caches.
|
||||
@ -391,3 +412,13 @@ func (i *multiNamespaceInformer) HasSynced() bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsStopped checks if each namespaced informer has stopped, returns false if any are still running.
|
||||
func (i *multiNamespaceInformer) IsStopped() bool {
|
||||
for _, informer := range i.namespaceToInformer {
|
||||
if stopped := informer.IsStopped(); !stopped {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
Reference in New Issue
Block a user