rebase: add controller runtime dependency

this commits add the controller runtime
and its dependency to the vendor.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2020-10-21 11:19:41 +05:30
committed by mergify[bot]
parent 14700b89d1
commit 5af3fe5deb
101 changed files with 11946 additions and 230 deletions

View File

@ -0,0 +1,141 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
var log = logf.RuntimeLog.WithName("object-cache")
// Cache knows how to load Kubernetes objects, fetch informers to request
// to receive events for Kubernetes objects (at a low-level),
// and add indices to fields on the objects stored in the cache.
type Cache interface {
// Cache acts as a client to objects stored in the cache.
client.Reader
// Cache loads informers and adds field indices.
Informers
}
// Informers knows how to create or fetch informers for different
// group-version-kinds, and add indices to those informers. It's safe to call
// GetInformer from multiple threads.
type Informers interface {
// GetInformer fetches or constructs an informer for the given object that corresponds to a single
// API kind and resource.
GetInformer(ctx context.Context, obj runtime.Object) (Informer, error)
// GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead
// of the underlying object.
GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error)
// Start runs all the informers known to this cache until the given channel is closed.
// It blocks.
Start(stopCh <-chan struct{}) error
// WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache.
WaitForCacheSync(stop <-chan struct{}) bool
// Informers knows how to add indices to the caches (informers) that it manages.
client.FieldIndexer
}
// Informer - informer allows you interact with the underlying informer
type Informer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler toolscache.ResourceEventHandler)
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
// specified resync period. Events to a single handler are delivered sequentially, but there is
// no coordination between different handlers.
AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration)
// AddIndexers adds more indexers to this store. If you call this after you already have data
// in the store, the results are undefined.
AddIndexers(indexers toolscache.Indexers) error
//HasSynced return true if the informers underlying store has synced
HasSynced() bool
}
// Options are the optional arguments for creating a new InformersMap object
type Options struct {
// Scheme is the scheme to use for mapping objects to GroupVersionKinds
Scheme *runtime.Scheme
// Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources
Mapper meta.RESTMapper
// Resync is the base frequency the informers are resynced.
// Defaults to defaultResyncTime.
// A 10 percent jitter will be added to the Resync period between informers
// So that all informers will not send list requests simultaneously.
Resync *time.Duration
// Namespace restricts the cache's ListWatch to the desired namespace
// Default watches all namespaces
Namespace string
}
var defaultResyncTime = 10 * time.Hour
// New initializes and returns a new Cache.
func New(config *rest.Config, opts Options) (Cache, error) {
opts, err := defaultOpts(config, opts)
if err != nil {
return nil, err
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace)
return &informerCache{InformersMap: im}, nil
}
func defaultOpts(config *rest.Config, opts Options) (Options, error) {
// Use the default Kubernetes Scheme if unset
if opts.Scheme == nil {
opts.Scheme = scheme.Scheme
}
// Construct a new Mapper if unset
if opts.Mapper == nil {
var err error
opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config)
if err != nil {
log.WithName("setup").Error(err, "Failed to get API Group-Resources")
return opts, fmt.Errorf("could not create RESTMapper from config")
}
}
// Default the resync period to 10 hours if unset
if opts.Resync == nil {
opts.Resync = &defaultResyncTime
}
return opts, nil
}

19
vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache provides object caches that act as caching client.Reader
// instances and help drive Kubernetes-object-based event handlers.
package cache

View File

@ -0,0 +1,218 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"reflect"
"strings"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
var (
_ Informers = &informerCache{}
_ client.Reader = &informerCache{}
_ Cache = &informerCache{}
)
// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started.
type ErrCacheNotStarted struct{}
func (*ErrCacheNotStarted) Error() string {
return "the cache is not started, can not read objects"
}
// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap.
type informerCache struct {
*internal.InformersMap
}
// Get implements Reader
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runtime.Object) error {
gvk, err := apiutil.GVKForObject(out, ip.Scheme)
if err != nil {
return err
}
started, cache, err := ip.InformersMap.Get(ctx, gvk, out)
if err != nil {
return err
}
if !started {
return &ErrCacheNotStarted{}
}
return cache.Reader.Get(ctx, key, out)
}
// List implements Reader
func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOption) error {
gvk, cacheTypeObj, err := ip.objectTypeForListObject(out)
if err != nil {
return err
}
started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj)
if err != nil {
return err
}
if !started {
return &ErrCacheNotStarted{}
}
return cache.Reader.List(ctx, out, opts...)
}
// objectTypeForListObject tries to find the runtime.Object and associated GVK
// for a single object corresponding to the passed-in list type. We need them
// because they are used as cache map key.
func (ip *informerCache) objectTypeForListObject(list runtime.Object) (*schema.GroupVersionKind, runtime.Object, error) {
gvk, err := apiutil.GVKForObject(list, ip.Scheme)
if err != nil {
return nil, nil, err
}
if !strings.HasSuffix(gvk.Kind, "List") {
return nil, nil, fmt.Errorf("non-list type %T (kind %q) passed as output", list, gvk)
}
// we need the non-list GVK, so chop off the "List" from the end of the kind
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
_, isUnstructured := list.(*unstructured.UnstructuredList)
var cacheTypeObj runtime.Object
if isUnstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(gvk)
cacheTypeObj = u
} else {
itemsPtr, err := apimeta.GetItemsPtr(list)
if err != nil {
return nil, nil, err
}
// http://knowyourmeme.com/memes/this-is-fine
elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem()
if elemType.Kind() != reflect.Ptr {
elemType = reflect.PtrTo(elemType)
}
cacheTypeValue := reflect.Zero(elemType)
var ok bool
cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object)
if !ok {
return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface())
}
}
return &gvk, cacheTypeObj, nil
}
// GetInformerForKind returns the informer for the GroupVersionKind
func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
// Map the gvk to an object
obj, err := ip.Scheme.New(gvk)
if err != nil {
return nil, err
}
_, i, err := ip.InformersMap.Get(ctx, gvk, obj)
if err != nil {
return nil, err
}
return i.Informer, err
}
// GetInformer returns the informer for the obj
func (ip *informerCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) {
gvk, err := apiutil.GVKForObject(obj, ip.Scheme)
if err != nil {
return nil, err
}
_, i, err := ip.InformersMap.Get(ctx, gvk, obj)
if err != nil {
return nil, err
}
return i.Informer, err
}
// NeedLeaderElection implements the LeaderElectionRunnable interface
// to indicate that this can be started without requiring the leader lock
func (ip *informerCache) NeedLeaderElection() bool {
return false
}
// IndexField adds an indexer to the underlying cache, using extraction function to get
// value(s) from the given field. This index can then be used by passing a field selector
// to List. For one-to-one compatibility with "normal" field selectors, only return one value.
// The values may be anything. They will automatically be prefixed with the namespace of the
// given object, if present. The objects passed are guaranteed to be objects of the correct type.
func (ip *informerCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error {
informer, err := ip.GetInformer(ctx, obj)
if err != nil {
return err
}
return indexByField(informer, field, extractValue)
}
func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error {
indexFunc := func(objRaw interface{}) ([]string, error) {
// TODO(directxman12): check if this is the correct type?
obj, isObj := objRaw.(runtime.Object)
if !isObj {
return nil, fmt.Errorf("object of type %T is not an Object", objRaw)
}
meta, err := apimeta.Accessor(obj)
if err != nil {
return nil, err
}
ns := meta.GetNamespace()
rawVals := extractor(obj)
var vals []string
if ns == "" {
// if we're not doubling the keys for the namespaced case, just re-use what was returned to us
vals = rawVals
} else {
// if we need to add non-namespaced versions too, double the length
vals = make([]string, len(rawVals)*2)
}
for i, rawVal := range rawVals {
// save a namespaced variant, so that we can ask
// "what are all the object matching a given index *in a given namespace*"
vals[i] = internal.KeyToNamespacedKey(ns, rawVal)
if ns != "" {
// if we have a namespace, also inject a special index key for listing
// regardless of the object namespace
vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal)
}
}
return vals, nil
}
return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc})
}

View File

@ -0,0 +1,185 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// CacheReader is a client.Reader
var _ client.Reader = &CacheReader{}
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type
type CacheReader struct {
// indexer is the underlying indexer wrapped by this cache.
indexer cache.Indexer
// groupVersionKind is the group-version-kind of the resource.
groupVersionKind schema.GroupVersionKind
}
// Get checks the indexer for the object and writes a copy of it if found
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.Object) error {
storeKey := objectKeyToStoreKey(key)
// Lookup the object from the indexer cache
obj, exists, err := c.indexer.GetByKey(storeKey)
if err != nil {
return err
}
// Not found, return an error
if !exists {
// Resource gets transformed into Kind in the error anyway, so this is fine
return errors.NewNotFound(schema.GroupResource{
Group: c.groupVersionKind.Group,
Resource: c.groupVersionKind.Kind,
}, key.Name)
}
// Verify the result is a runtime.Object
if _, isObj := obj.(runtime.Object); !isObj {
// This should never happen
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
// deep copy to avoid mutating cache
// TODO(directxman12): revisit the decision to always deepcopy
obj = obj.(runtime.Object).DeepCopyObject()
// Copy the value of the item in the cache to the returned value
// TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto)
outVal := reflect.ValueOf(out)
objVal := reflect.ValueOf(obj)
if !objVal.Type().AssignableTo(outVal.Type()) {
return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type())
}
reflect.Indirect(outVal).Set(reflect.Indirect(objVal))
out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
return nil
}
// List lists items out of the indexer and writes them to out
func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error {
var objs []interface{}
var err error
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
if listOpts.FieldSelector != nil {
// TODO(directxman12): support more complicated field selectors by
// combining multiple indices, GetIndexers, etc
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
if !requiresExact {
return fmt.Errorf("non-exact field matches are not supported by the cache")
}
// list all objects by the field selector. If this is namespaced and we have one, ask for the
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
// namespace.
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
} else if listOpts.Namespace != "" {
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace)
} else {
objs = c.indexer.List()
}
if err != nil {
return err
}
var labelSel labels.Selector
if listOpts.LabelSelector != nil {
labelSel = listOpts.LabelSelector
}
runtimeObjs := make([]runtime.Object, 0, len(objs))
for _, item := range objs {
obj, isObj := item.(runtime.Object)
if !isObj {
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
meta, err := apimeta.Accessor(obj)
if err != nil {
return err
}
if labelSel != nil {
lbls := labels.Set(meta.GetLabels())
if !labelSel.Matches(lbls) {
continue
}
}
outObj := obj.DeepCopyObject()
outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
runtimeObjs = append(runtimeObjs, outObj)
}
return apimeta.SetList(out, runtimeObjs)
}
// objectKeyToStorageKey converts an object key to store key.
// It's akin to MetaNamespaceKeyFunc. It's separate from
// String to allow keeping the key format easily in sync with
// MetaNamespaceKeyFunc.
func objectKeyToStoreKey(k client.ObjectKey) string {
if k.Namespace == "" {
return k.Name
}
return k.Namespace + "/" + k.Name
}
// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`.
func requiresExactMatch(sel fields.Selector) (field, val string, required bool) {
reqs := sel.Requirements()
if len(reqs) != 1 {
return "", "", false
}
req := reqs[0]
if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals {
return "", "", false
}
return req.Field, req.Value, true
}
// FieldIndexName constructs the name of the index over the given field,
// for use with an indexer.
func FieldIndexName(field string) string {
return "field:" + field
}
// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces
const allNamespacesNamespace = "__all_namespaces"
// KeyToNamespacedKey prefixes the given index key with a namespace
// for use in field selector indexes.
func KeyToNamespacedKey(ns string, baseKey string) string {
if ns != "" {
return ns + "/" + baseKey
}
return allNamespacesNamespace + "/" + baseKey
}

View File

@ -0,0 +1,103 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
// It uses a standard parameter codec constructed based on the given generated Scheme.
type InformersMap struct {
// we abstract over the details of structured vs unstructured with the specificInformerMaps
structured *specificInformersMap
unstructured *specificInformersMap
// Scheme maps runtime.Objects to GroupVersionKinds
Scheme *runtime.Scheme
}
// NewInformersMap creates a new InformersMap that can create informers for
// both structured and unstructured objects.
func NewInformersMap(config *rest.Config,
scheme *runtime.Scheme,
mapper meta.RESTMapper,
resync time.Duration,
namespace string) *InformersMap {
return &InformersMap{
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace),
Scheme: scheme,
}
}
// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel.
func (m *InformersMap) Start(stop <-chan struct{}) error {
go m.structured.Start(stop)
go m.unstructured.Start(stop)
<-stop
return nil
}
// WaitForCacheSync waits until all the caches have been started and synced.
func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool {
syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...)
syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...)
if !m.structured.waitForStarted(stop) {
return false
}
if !m.unstructured.waitForStarted(stop) {
return false
}
return cache.WaitForCacheSync(stop, syncedFuncs...)
}
// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns
// the Informer from the map.
func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
_, isUnstructured := obj.(*unstructured.Unstructured)
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
isUnstructured = isUnstructured || isUnstructuredList
if isUnstructured {
return m.unstructured.Get(ctx, gvk, obj)
}
return m.structured.Get(ctx, gvk, obj)
}
// newStructuredInformersMap creates a new InformersMap for structured objects.
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createStructuredListWatch)
}
// newUnstructuredInformersMap creates a new InformersMap for unstructured objects.
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch)
}

View File

@ -0,0 +1,313 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"fmt"
"math/rand"
"sync"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// clientListWatcherFunc knows how to create a ListWatcher
type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error)
// newSpecificInformersMap returns a new specificInformersMap (like
// the generical InformersMap, except that it doesn't implement WaitForCacheSync).
func newSpecificInformersMap(config *rest.Config,
scheme *runtime.Scheme,
mapper meta.RESTMapper,
resync time.Duration,
namespace string,
createListWatcher createListWatcherFunc) *specificInformersMap {
ip := &specificInformersMap{
config: config,
Scheme: scheme,
mapper: mapper,
informersByGVK: make(map[schema.GroupVersionKind]*MapEntry),
codecs: serializer.NewCodecFactory(scheme),
paramCodec: runtime.NewParameterCodec(scheme),
resync: resync,
startWait: make(chan struct{}),
createListWatcher: createListWatcher,
namespace: namespace,
}
return ip
}
// MapEntry contains the cached data for an Informer
type MapEntry struct {
// Informer is the cached informer
Informer cache.SharedIndexInformer
// CacheReader wraps Informer and implements the CacheReader interface for a single type
Reader CacheReader
}
// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
// It uses a standard parameter codec constructed based on the given generated Scheme.
type specificInformersMap struct {
// Scheme maps runtime.Objects to GroupVersionKinds
Scheme *runtime.Scheme
// config is used to talk to the apiserver
config *rest.Config
// mapper maps GroupVersionKinds to Resources
mapper meta.RESTMapper
// informersByGVK is the cache of informers keyed by groupVersionKind
informersByGVK map[schema.GroupVersionKind]*MapEntry
// codecs is used to create a new REST client
codecs serializer.CodecFactory
// paramCodec is used by list and watch
paramCodec runtime.ParameterCodec
// stop is the stop channel to stop informers
stop <-chan struct{}
// resync is the base frequency the informers are resynced
// a 10 percent jitter will be added to the resync period between informers
// so that all informers will not send list requests simultaneously.
resync time.Duration
// mu guards access to the map
mu sync.RWMutex
// start is true if the informers have been started
started bool
// startWait is a channel that is closed after the
// informer has been started.
startWait chan struct{}
// createClient knows how to create a client and a list object,
// and allows for abstracting over the particulars of structured vs
// unstructured objects.
createListWatcher createListWatcherFunc
// namespace is the namespace that all ListWatches are restricted to
// default or empty string means all namespaces
namespace string
}
// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel.
// It doesn't return start because it can't return an error, and it's not a runnable directly.
func (ip *specificInformersMap) Start(stop <-chan struct{}) {
func() {
ip.mu.Lock()
defer ip.mu.Unlock()
// Set the stop channel so it can be passed to informers that are added later
ip.stop = stop
// Start each informer
for _, informer := range ip.informersByGVK {
go informer.Informer.Run(stop)
}
// Set started to true so we immediately start any informers added later.
ip.started = true
close(ip.startWait)
}()
<-stop
}
func (ip *specificInformersMap) waitForStarted(stop <-chan struct{}) bool {
select {
case <-ip.startWait:
return true
case <-stop:
return false
}
}
// HasSyncedFuncs returns all the HasSynced functions for the informers in this map.
func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced {
ip.mu.RLock()
defer ip.mu.RUnlock()
syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK))
for _, informer := range ip.informersByGVK {
syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced)
}
return syncedFuncs
}
// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns
// the Informer from the map.
func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
// Return the informer if it is found
i, started, ok := func() (*MapEntry, bool, bool) {
ip.mu.RLock()
defer ip.mu.RUnlock()
i, ok := ip.informersByGVK[gvk]
return i, ip.started, ok
}()
if !ok {
var err error
if i, started, err = ip.addInformerToMap(gvk, obj); err != nil {
return started, nil, err
}
}
if started && !i.Informer.HasSynced() {
// Wait for it to sync before returning the Informer so that folks don't read from a stale cache.
if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) {
return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0)
}
}
return started, i, nil
}
func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) {
ip.mu.Lock()
defer ip.mu.Unlock()
// Check the cache to see if we already have an Informer. If we do, return the Informer.
// This is for the case where 2 routines tried to get the informer when it wasn't in the map
// so neither returned early, but the first one created it.
if i, ok := ip.informersByGVK[gvk]; ok {
return i, ip.started, nil
}
// Create a NewSharedIndexInformer and add it to the map.
var lw *cache.ListWatch
lw, err := ip.createListWatcher(gvk, ip)
if err != nil {
return nil, false, err
}
ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
})
i := &MapEntry{
Informer: ni,
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk},
}
ip.informersByGVK[gvk] = i
// Start the Informer if need by
// TODO(seans): write thorough tests and document what happens here - can you add indexers?
// can you add eventhandlers?
if ip.started {
go i.Informer.Run(ip.stop)
}
return i, ip.started, nil
}
// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer.
func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
// groupVersionKind to the Resource API we will use.
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
client, err := apiutil.RESTClientForGVK(gvk, ip.config, ip.codecs)
if err != nil {
return nil, err
}
listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List")
listObj, err := ip.Scheme.New(listGVK)
if err != nil {
return nil, err
}
// TODO: the functions that make use of this ListWatch should be adapted to
// pass in their own contexts instead of relying on this fixed one here.
ctx := context.TODO()
// Create a new ListWatch for the obj
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
res := listObj.DeepCopyObject()
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res)
return res, err
},
// Setup the watch function
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
// Watch needs to be set to true separately
opts.Watch = true
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
return client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx)
},
}, nil
}
func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
// groupVersionKind to the Resource API we will use.
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
dynamicClient, err := dynamic.NewForConfig(ip.config)
if err != nil {
return nil, err
}
// TODO: the functions that make use of this ListWatch should be adapted to
// pass in their own contexts instead of relying on this fixed one here.
ctx := context.TODO()
// Create a new ListWatch for the obj
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).List(ctx, opts)
},
// Setup the watch function
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
// Watch needs to be set to true separately
opts.Watch = true
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts)
},
}, nil
}
// resyncPeriod returns a function which generates a duration each time it is
// invoked; this is so that multiple controllers don't get into lock-step and all
// hammer the apiserver with list requests simultaneously.
func resyncPeriod(resync time.Duration) func() time.Duration {
return func() time.Duration {
// the factor will fall into [0.9, 1.1)
factor := rand.Float64()/5.0 + 0.9
return time.Duration(float64(resync.Nanoseconds()) * factor)
}
}

View File

@ -0,0 +1,222 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// NewCacheFunc - Function for creating a new cache from the options and a rest config
type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error)
// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache.
// This will scope the cache to a list of namespaces. Listing for all namespaces
// will list for all the namespaces that this knows about. Note that this is not intended
// to be used for excluding namespaces, this is better done via a Predicate. Also note that
// you may face performance issues when using this with a high number of namespaces.
func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
return func(config *rest.Config, opts Options) (Cache, error) {
opts, err := defaultOpts(config, opts)
if err != nil {
return nil, err
}
caches := map[string]Cache{}
for _, ns := range namespaces {
opts.Namespace = ns
c, err := New(config, opts)
if err != nil {
return nil, err
}
caches[ns] = c
}
return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme}, nil
}
}
// multiNamespaceCache knows how to handle multiple namespaced caches
// Use this feature when scoping permissions for your
// operator to a list of namespaces instead of watching every namespace
// in the cluster.
type multiNamespaceCache struct {
namespaceToCache map[string]Cache
Scheme *runtime.Scheme
}
var _ Cache = &multiNamespaceCache{}
// Methods for multiNamespaceCache to conform to the Informers interface
func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) {
informers := map[string]Informer{}
for ns, cache := range c.namespaceToCache {
informer, err := cache.GetInformer(ctx, obj)
if err != nil {
return nil, err
}
informers[ns] = informer
}
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
informers := map[string]Informer{}
for ns, cache := range c.namespaceToCache {
informer, err := cache.GetInformerForKind(ctx, gvk)
if err != nil {
return nil, err
}
informers[ns] = informer
}
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
}
func (c *multiNamespaceCache) Start(stopCh <-chan struct{}) error {
for ns, cache := range c.namespaceToCache {
go func(ns string, cache Cache) {
err := cache.Start(stopCh)
if err != nil {
log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns)
}
}(ns, cache)
}
<-stopCh
return nil
}
func (c *multiNamespaceCache) WaitForCacheSync(stop <-chan struct{}) bool {
synced := true
for _, cache := range c.namespaceToCache {
if s := cache.WaitForCacheSync(stop); !s {
synced = s
}
}
return synced
}
func (c *multiNamespaceCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error {
for _, cache := range c.namespaceToCache {
if err := cache.IndexField(ctx, obj, field, extractValue); err != nil {
return err
}
}
return nil
}
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
cache, ok := c.namespaceToCache[key.Namespace]
if !ok {
return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key)
}
return cache.Get(ctx, key, obj)
}
// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces.
func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error {
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
if listOpts.Namespace != corev1.NamespaceAll {
cache, ok := c.namespaceToCache[listOpts.Namespace]
if !ok {
return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace)
}
return cache.List(ctx, list, opts...)
}
listAccessor, err := meta.ListAccessor(list)
if err != nil {
return err
}
allItems, err := apimeta.ExtractList(list)
if err != nil {
return err
}
var resourceVersion string
for _, cache := range c.namespaceToCache {
listObj := list.DeepCopyObject()
err = cache.List(ctx, listObj, opts...)
if err != nil {
return err
}
items, err := apimeta.ExtractList(listObj)
if err != nil {
return err
}
accessor, err := meta.ListAccessor(listObj)
if err != nil {
return fmt.Errorf("object: %T must be a list type", list)
}
allItems = append(allItems, items...)
// The last list call should have the most correct resource version.
resourceVersion = accessor.GetResourceVersion()
}
listAccessor.SetResourceVersion(resourceVersion)
return apimeta.SetList(list, allItems)
}
// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces
type multiNamespaceInformer struct {
namespaceToInformer map[string]Informer
}
var _ Informer = &multiNamespaceInformer{}
// AddEventHandler adds the handler to each namespaced informer
func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandler(handler)
}
}
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer
func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
}
}
// AddIndexers adds the indexer for each namespaced informer
func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error {
for _, informer := range i.namespaceToInformer {
err := informer.AddIndexers(indexers)
if err != nil {
return err
}
}
return nil
}
// HasSynced checks if each namespaced informer has synced
func (i *multiNamespaceInformer) HasSynced() bool {
for _, informer := range i.namespaceToInformer {
if ok := informer.HasSynced(); !ok {
return ok
}
}
return true
}