mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update controller-runtime package to v0.9.2
This commit updates controller-runtime to v0.9.2 and makes changes in persistentvolume.go to add context to various functions and function calls made here instead of context.TODO(). Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
66
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
66
vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
generated
vendored
@ -52,24 +52,24 @@ type Cache interface {
|
||||
type Informers interface {
|
||||
// GetInformer fetches or constructs an informer for the given object that corresponds to a single
|
||||
// API kind and resource.
|
||||
GetInformer(ctx context.Context, obj runtime.Object) (Informer, error)
|
||||
GetInformer(ctx context.Context, obj client.Object) (Informer, error)
|
||||
|
||||
// GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead
|
||||
// of the underlying object.
|
||||
GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error)
|
||||
|
||||
// Start runs all the informers known to this cache until the given channel is closed.
|
||||
// Start runs all the informers known to this cache until the context is closed.
|
||||
// It blocks.
|
||||
Start(stopCh <-chan struct{}) error
|
||||
Start(ctx context.Context) error
|
||||
|
||||
// WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache.
|
||||
WaitForCacheSync(stop <-chan struct{}) bool
|
||||
WaitForCacheSync(ctx context.Context) bool
|
||||
|
||||
// Informers knows how to add indices to the caches (informers) that it manages.
|
||||
client.FieldIndexer
|
||||
}
|
||||
|
||||
// Informer - informer allows you interact with the underlying informer
|
||||
// Informer - informer allows you interact with the underlying informer.
|
||||
type Informer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
@ -82,11 +82,14 @@ type Informer interface {
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(indexers toolscache.Indexers) error
|
||||
//HasSynced return true if the informers underlying store has synced
|
||||
// HasSynced return true if the informers underlying store has synced.
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// Options are the optional arguments for creating a new InformersMap object
|
||||
// SelectorsByObject associate a client.Object's GVK to a field/label selector.
|
||||
type SelectorsByObject map[client.Object]internal.Selector
|
||||
|
||||
// Options are the optional arguments for creating a new InformersMap object.
|
||||
type Options struct {
|
||||
// Scheme is the scheme to use for mapping objects to GroupVersionKinds
|
||||
Scheme *runtime.Scheme
|
||||
@ -103,6 +106,13 @@ type Options struct {
|
||||
// Namespace restricts the cache's ListWatch to the desired namespace
|
||||
// Default watches all namespaces
|
||||
Namespace string
|
||||
|
||||
// SelectorsByObject restricts the cache's ListWatch to the desired
|
||||
// fields per GVK at the specified object, the map's value must implement
|
||||
// Selector [1] using for example a Set [2]
|
||||
// [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector
|
||||
// [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set
|
||||
SelectorsByObject SelectorsByObject
|
||||
}
|
||||
|
||||
var defaultResyncTime = 10 * time.Hour
|
||||
@ -113,10 +123,38 @@ func New(config *rest.Config, opts Options) (Cache, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace)
|
||||
selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.Scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK)
|
||||
return &informerCache{InformersMap: im}, nil
|
||||
}
|
||||
|
||||
// BuilderWithOptions returns a Cache constructor that will build the a cache
|
||||
// honoring the options argument, this is useful to specify options like
|
||||
// SelectorsByObject
|
||||
// WARNING: if SelectorsByObject is specified. filtered out resources are not
|
||||
// returned.
|
||||
func BuilderWithOptions(options Options) NewCacheFunc {
|
||||
return func(config *rest.Config, opts Options) (Cache, error) {
|
||||
if opts.Scheme == nil {
|
||||
opts.Scheme = options.Scheme
|
||||
}
|
||||
if opts.Mapper == nil {
|
||||
opts.Mapper = options.Mapper
|
||||
}
|
||||
if opts.Resync == nil {
|
||||
opts.Resync = options.Resync
|
||||
}
|
||||
if opts.Namespace == "" {
|
||||
opts.Namespace = options.Namespace
|
||||
}
|
||||
opts.SelectorsByObject = options.SelectorsByObject
|
||||
return New(config, opts)
|
||||
}
|
||||
}
|
||||
|
||||
func defaultOpts(config *rest.Config, opts Options) (Options, error) {
|
||||
// Use the default Kubernetes Scheme if unset
|
||||
if opts.Scheme == nil {
|
||||
@ -139,3 +177,15 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) {
|
||||
selectorsByGVK := internal.SelectorsByGVK{}
|
||||
for object, selector := range selectorsByObject {
|
||||
gvk, err := apiutil.GVKForObject(object, scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selectorsByGVK[gvk] = selector
|
||||
}
|
||||
return selectorsByGVK, nil
|
||||
}
|
||||
|
23
vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go
generated
vendored
23
vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go
generated
vendored
@ -50,8 +50,8 @@ type informerCache struct {
|
||||
*internal.InformersMap
|
||||
}
|
||||
|
||||
// Get implements Reader
|
||||
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runtime.Object) error {
|
||||
// Get implements Reader.
|
||||
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error {
|
||||
gvk, err := apiutil.GVKForObject(out, ip.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -68,9 +68,8 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runt
|
||||
return cache.Reader.Get(ctx, key, out)
|
||||
}
|
||||
|
||||
// List implements Reader
|
||||
func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOption) error {
|
||||
|
||||
// List implements Reader.
|
||||
func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error {
|
||||
gvk, cacheTypeObj, err := ip.objectTypeForListObject(out)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -91,7 +90,7 @@ func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...c
|
||||
// objectTypeForListObject tries to find the runtime.Object and associated GVK
|
||||
// for a single object corresponding to the passed-in list type. We need them
|
||||
// because they are used as cache map key.
|
||||
func (ip *informerCache) objectTypeForListObject(list runtime.Object) (*schema.GroupVersionKind, runtime.Object, error) {
|
||||
func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) {
|
||||
gvk, err := apiutil.GVKForObject(list, ip.Scheme)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -130,7 +129,7 @@ func (ip *informerCache) objectTypeForListObject(list runtime.Object) (*schema.G
|
||||
return &gvk, cacheTypeObj, nil
|
||||
}
|
||||
|
||||
// GetInformerForKind returns the informer for the GroupVersionKind
|
||||
// GetInformerForKind returns the informer for the GroupVersionKind.
|
||||
func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
|
||||
// Map the gvk to an object
|
||||
obj, err := ip.Scheme.New(gvk)
|
||||
@ -145,8 +144,8 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou
|
||||
return i.Informer, err
|
||||
}
|
||||
|
||||
// GetInformer returns the informer for the obj
|
||||
func (ip *informerCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) {
|
||||
// GetInformer returns the informer for the obj.
|
||||
func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, ip.Scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -160,7 +159,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj runtime.Object) (I
|
||||
}
|
||||
|
||||
// NeedLeaderElection implements the LeaderElectionRunnable interface
|
||||
// to indicate that this can be started without requiring the leader lock
|
||||
// to indicate that this can be started without requiring the leader lock.
|
||||
func (ip *informerCache) NeedLeaderElection() bool {
|
||||
return false
|
||||
}
|
||||
@ -170,7 +169,7 @@ func (ip *informerCache) NeedLeaderElection() bool {
|
||||
// to List. For one-to-one compatibility with "normal" field selectors, only return one value.
|
||||
// The values may be anything. They will automatically be prefixed with the namespace of the
|
||||
// given object, if present. The objects passed are guaranteed to be objects of the correct type.
|
||||
func (ip *informerCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error {
|
||||
func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
|
||||
informer, err := ip.GetInformer(ctx, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -181,7 +180,7 @@ func (ip *informerCache) IndexField(ctx context.Context, obj runtime.Object, fie
|
||||
func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error {
|
||||
indexFunc := func(objRaw interface{}) ([]string, error) {
|
||||
// TODO(directxman12): check if this is the correct type?
|
||||
obj, isObj := objRaw.(runtime.Object)
|
||||
obj, isObj := objRaw.(client.Object)
|
||||
if !isObj {
|
||||
return nil, fmt.Errorf("object of type %T is not an Object", objRaw)
|
||||
}
|
||||
|
41
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
41
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -29,23 +29,30 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// CacheReader is a client.Reader
|
||||
// CacheReader is a client.Reader.
|
||||
var _ client.Reader = &CacheReader{}
|
||||
|
||||
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type
|
||||
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type.
|
||||
type CacheReader struct {
|
||||
// indexer is the underlying indexer wrapped by this cache.
|
||||
indexer cache.Indexer
|
||||
|
||||
// groupVersionKind is the group-version-kind of the resource.
|
||||
groupVersionKind schema.GroupVersionKind
|
||||
|
||||
// scopeName is the scope of the resource (namespaced or cluster-scoped).
|
||||
scopeName apimeta.RESTScopeName
|
||||
}
|
||||
|
||||
// Get checks the indexer for the object and writes a copy of it if found
|
||||
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.Object) error {
|
||||
// Get checks the indexer for the object and writes a copy of it if found.
|
||||
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error {
|
||||
if c.scopeName == apimeta.RESTScopeNameRoot {
|
||||
key.Namespace = ""
|
||||
}
|
||||
storeKey := objectKeyToStoreKey(key)
|
||||
|
||||
// Lookup the object from the indexer cache
|
||||
@ -57,7 +64,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O
|
||||
// Not found, return an error
|
||||
if !exists {
|
||||
// Resource gets transformed into Kind in the error anyway, so this is fine
|
||||
return errors.NewNotFound(schema.GroupResource{
|
||||
return apierrors.NewNotFound(schema.GroupResource{
|
||||
Group: c.groupVersionKind.Group,
|
||||
Resource: c.groupVersionKind.Kind,
|
||||
}, key.Name)
|
||||
@ -86,15 +93,16 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists items out of the indexer and writes them to out
|
||||
func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error {
|
||||
// List lists items out of the indexer and writes them to out.
|
||||
func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error {
|
||||
var objs []interface{}
|
||||
var err error
|
||||
|
||||
listOpts := client.ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
if listOpts.FieldSelector != nil {
|
||||
switch {
|
||||
case listOpts.FieldSelector != nil:
|
||||
// TODO(directxman12): support more complicated field selectors by
|
||||
// combining multiple indices, GetIndexers, etc
|
||||
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
|
||||
@ -105,9 +113,9 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client
|
||||
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
|
||||
// namespace.
|
||||
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
|
||||
} else if listOpts.Namespace != "" {
|
||||
case listOpts.Namespace != "":
|
||||
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace)
|
||||
} else {
|
||||
default:
|
||||
objs = c.indexer.List()
|
||||
}
|
||||
if err != nil {
|
||||
@ -118,8 +126,15 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client
|
||||
labelSel = listOpts.LabelSelector
|
||||
}
|
||||
|
||||
limitSet := listOpts.Limit > 0
|
||||
|
||||
runtimeObjs := make([]runtime.Object, 0, len(objs))
|
||||
for _, item := range objs {
|
||||
for i, item := range objs {
|
||||
// if the Limit option is set and the number of items
|
||||
// listed exceeds this limit, then stop reading.
|
||||
if limitSet && int64(i) >= listOpts.Limit {
|
||||
break
|
||||
}
|
||||
obj, isObj := item.(runtime.Object)
|
||||
if !isObj {
|
||||
return fmt.Errorf("cache contained %T, which is not an Object", obj)
|
||||
@ -172,7 +187,7 @@ func FieldIndexName(field string) string {
|
||||
return "field:" + field
|
||||
}
|
||||
|
||||
// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces
|
||||
// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces.
|
||||
const allNamespacesNamespace = "__all_namespaces"
|
||||
|
||||
// KeyToNamespacedKey prefixes the given index key with a namespace
|
||||
|
71
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go
generated
vendored
71
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -31,10 +32,12 @@ import (
|
||||
// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs.
|
||||
// It uses a standard parameter codec constructed based on the given generated Scheme.
|
||||
type InformersMap struct {
|
||||
// we abstract over the details of structured vs unstructured with the specificInformerMaps
|
||||
// we abstract over the details of structured/unstructured/metadata with the specificInformerMaps
|
||||
// TODO(directxman12): genericize this over different projections now that we have 3 different maps
|
||||
|
||||
structured *specificInformersMap
|
||||
unstructured *specificInformersMap
|
||||
metadata *specificInformersMap
|
||||
|
||||
// Scheme maps runtime.Objects to GroupVersionKinds
|
||||
Scheme *runtime.Scheme
|
||||
@ -46,58 +49,76 @@ func NewInformersMap(config *rest.Config,
|
||||
scheme *runtime.Scheme,
|
||||
mapper meta.RESTMapper,
|
||||
resync time.Duration,
|
||||
namespace string) *InformersMap {
|
||||
|
||||
namespace string,
|
||||
selectors SelectorsByGVK,
|
||||
) *InformersMap {
|
||||
return &InformersMap{
|
||||
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace),
|
||||
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace),
|
||||
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors),
|
||||
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors),
|
||||
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors),
|
||||
|
||||
Scheme: scheme,
|
||||
}
|
||||
}
|
||||
|
||||
// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel.
|
||||
func (m *InformersMap) Start(stop <-chan struct{}) error {
|
||||
go m.structured.Start(stop)
|
||||
go m.unstructured.Start(stop)
|
||||
<-stop
|
||||
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
|
||||
func (m *InformersMap) Start(ctx context.Context) error {
|
||||
go m.structured.Start(ctx)
|
||||
go m.unstructured.Start(ctx)
|
||||
go m.metadata.Start(ctx)
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits until all the caches have been started and synced.
|
||||
func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool {
|
||||
func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool {
|
||||
syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...)
|
||||
syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...)
|
||||
syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...)
|
||||
|
||||
if !m.structured.waitForStarted(stop) {
|
||||
if !m.structured.waitForStarted(ctx) {
|
||||
return false
|
||||
}
|
||||
if !m.unstructured.waitForStarted(stop) {
|
||||
if !m.unstructured.waitForStarted(ctx) {
|
||||
return false
|
||||
}
|
||||
return cache.WaitForCacheSync(stop, syncedFuncs...)
|
||||
if !m.metadata.waitForStarted(ctx) {
|
||||
return false
|
||||
}
|
||||
return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...)
|
||||
}
|
||||
|
||||
// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns
|
||||
// the Informer from the map.
|
||||
func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
|
||||
isUnstructured = isUnstructured || isUnstructuredList
|
||||
|
||||
if isUnstructured {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return m.unstructured.Get(ctx, gvk, obj)
|
||||
case *unstructured.UnstructuredList:
|
||||
return m.unstructured.Get(ctx, gvk, obj)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return m.metadata.Get(ctx, gvk, obj)
|
||||
case *metav1.PartialObjectMetadataList:
|
||||
return m.metadata.Get(ctx, gvk, obj)
|
||||
default:
|
||||
return m.structured.Get(ctx, gvk, obj)
|
||||
}
|
||||
|
||||
return m.structured.Get(ctx, gvk, obj)
|
||||
}
|
||||
|
||||
// newStructuredInformersMap creates a new InformersMap for structured objects.
|
||||
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap {
|
||||
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createStructuredListWatch)
|
||||
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
|
||||
namespace string, selectors SelectorsByGVK) *specificInformersMap {
|
||||
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createStructuredListWatch)
|
||||
}
|
||||
|
||||
// newUnstructuredInformersMap creates a new InformersMap for unstructured objects.
|
||||
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap {
|
||||
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch)
|
||||
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
|
||||
namespace string, selectors SelectorsByGVK) *specificInformersMap {
|
||||
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createUnstructuredListWatch)
|
||||
}
|
||||
|
||||
// newMetadataInformersMap creates a new InformersMap for metadata-only objects.
|
||||
func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
|
||||
namespace string, selectors SelectorsByGVK) *specificInformersMap {
|
||||
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, createMetadataListWatch)
|
||||
}
|
||||
|
103
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go
generated
vendored
103
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go
generated
vendored
@ -31,13 +31,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/metadata"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// clientListWatcherFunc knows how to create a ListWatcher
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// clientListWatcherFunc knows how to create a ListWatcher.
|
||||
type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error)
|
||||
|
||||
// newSpecificInformersMap returns a new specificInformersMap (like
|
||||
@ -47,6 +51,7 @@ func newSpecificInformersMap(config *rest.Config,
|
||||
mapper meta.RESTMapper,
|
||||
resync time.Duration,
|
||||
namespace string,
|
||||
selectors SelectorsByGVK,
|
||||
createListWatcher createListWatcherFunc) *specificInformersMap {
|
||||
ip := &specificInformersMap{
|
||||
config: config,
|
||||
@ -59,11 +64,12 @@ func newSpecificInformersMap(config *rest.Config,
|
||||
startWait: make(chan struct{}),
|
||||
createListWatcher: createListWatcher,
|
||||
namespace: namespace,
|
||||
selectors: selectors,
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// MapEntry contains the cached data for an Informer
|
||||
// MapEntry contains the cached data for an Informer.
|
||||
type MapEntry struct {
|
||||
// Informer is the cached informer
|
||||
Informer cache.SharedIndexInformer
|
||||
@ -119,35 +125,39 @@ type specificInformersMap struct {
|
||||
// namespace is the namespace that all ListWatches are restricted to
|
||||
// default or empty string means all namespaces
|
||||
namespace string
|
||||
|
||||
// selectors are the label or field selectors that will be added to the
|
||||
// ListWatch ListOptions.
|
||||
selectors SelectorsByGVK
|
||||
}
|
||||
|
||||
// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel.
|
||||
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
|
||||
// It doesn't return start because it can't return an error, and it's not a runnable directly.
|
||||
func (ip *specificInformersMap) Start(stop <-chan struct{}) {
|
||||
func (ip *specificInformersMap) Start(ctx context.Context) {
|
||||
func() {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
|
||||
// Set the stop channel so it can be passed to informers that are added later
|
||||
ip.stop = stop
|
||||
ip.stop = ctx.Done()
|
||||
|
||||
// Start each informer
|
||||
for _, informer := range ip.informersByGVK {
|
||||
go informer.Informer.Run(stop)
|
||||
go informer.Informer.Run(ctx.Done())
|
||||
}
|
||||
|
||||
// Set started to true so we immediately start any informers added later.
|
||||
ip.started = true
|
||||
close(ip.startWait)
|
||||
}()
|
||||
<-stop
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (ip *specificInformersMap) waitForStarted(stop <-chan struct{}) bool {
|
||||
func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ip.startWait:
|
||||
return true
|
||||
case <-stop:
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -211,9 +221,20 @@ func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, ob
|
||||
ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{
|
||||
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
|
||||
})
|
||||
rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
switch obj.(type) {
|
||||
case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList:
|
||||
ni = metadataSharedIndexInformerPreserveGVK(gvk, ni)
|
||||
default:
|
||||
}
|
||||
|
||||
i := &MapEntry{
|
||||
Informer: ni,
|
||||
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk},
|
||||
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk, scopeName: rm.Scope.Name()},
|
||||
}
|
||||
ip.informersByGVK[gvk] = i
|
||||
|
||||
@ -235,7 +256,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := apiutil.RESTClientForGVK(gvk, ip.config, ip.codecs)
|
||||
client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -251,6 +272,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer
|
||||
// Create a new ListWatch for the obj
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
res := listObj.DeepCopyObject()
|
||||
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
|
||||
err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res)
|
||||
@ -258,6 +280,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer
|
||||
},
|
||||
// Setup the watch function
|
||||
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
// Watch needs to be set to true separately
|
||||
opts.Watch = true
|
||||
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
|
||||
@ -273,7 +296,12 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(ip.config)
|
||||
|
||||
// If the rest configuration has a negotiated serializer passed in,
|
||||
// we should remove it and use the one that the dynamic client sets for us.
|
||||
cfg := rest.CopyConfig(ip.config)
|
||||
cfg.NegotiatedSerializer = nil
|
||||
dynamicClient, err := dynamic.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -284,6 +312,7 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
|
||||
// Create a new ListWatch for the obj
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||
return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts)
|
||||
}
|
||||
@ -291,6 +320,7 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
|
||||
},
|
||||
// Setup the watch function
|
||||
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
// Watch needs to be set to true separately
|
||||
opts.Watch = true
|
||||
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||
@ -301,13 +331,58 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) {
|
||||
// Kubernetes APIs work against Resources, not GroupVersionKinds. Map the
|
||||
// groupVersionKind to the Resource API we will use.
|
||||
mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Always clear the negotiated serializer and use the one
|
||||
// set from the metadata client.
|
||||
cfg := rest.CopyConfig(ip.config)
|
||||
cfg.NegotiatedSerializer = nil
|
||||
|
||||
// grab the metadata client
|
||||
client, err := metadata.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: the functions that make use of this ListWatch should be adapted to
|
||||
// pass in their own contexts instead of relying on this fixed one here.
|
||||
ctx := context.TODO()
|
||||
|
||||
// create the relevant listwatch
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||
return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts)
|
||||
}
|
||||
return client.Resource(mapping.Resource).List(ctx, opts)
|
||||
},
|
||||
// Setup the watch function
|
||||
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
ip.selectors[gvk].ApplyToList(&opts)
|
||||
// Watch needs to be set to true separately
|
||||
opts.Watch = true
|
||||
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||
return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts)
|
||||
}
|
||||
return client.Resource(mapping.Resource).Watch(ctx, opts)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// resyncPeriod returns a function which generates a duration each time it is
|
||||
// invoked; this is so that multiple controllers don't get into lock-step and all
|
||||
// hammer the apiserver with list requests simultaneously.
|
||||
func resyncPeriod(resync time.Duration) func() time.Duration {
|
||||
return func() time.Duration {
|
||||
// the factor will fall into [0.9, 1.1)
|
||||
factor := rand.Float64()/5.0 + 0.9
|
||||
factor := rand.Float64()/5.0 + 0.9 //nolint:gosec
|
||||
return time.Duration(float64(resync.Nanoseconds()) * factor)
|
||||
}
|
||||
}
|
||||
|
71
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/metadata_infomer_wrapper.go
generated
vendored
Normal file
71
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/metadata_infomer_wrapper.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func metadataSharedIndexInformerPreserveGVK(gvk schema.GroupVersionKind, si cache.SharedIndexInformer) cache.SharedIndexInformer {
|
||||
return &sharedInformerWrapper{
|
||||
gvk: gvk,
|
||||
SharedIndexInformer: si,
|
||||
}
|
||||
}
|
||||
|
||||
type sharedInformerWrapper struct {
|
||||
gvk schema.GroupVersionKind
|
||||
cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
func (s *sharedInformerWrapper) AddEventHandler(handler cache.ResourceEventHandler) {
|
||||
s.SharedIndexInformer.AddEventHandler(&handlerPreserveGVK{s.gvk, handler})
|
||||
}
|
||||
|
||||
func (s *sharedInformerWrapper) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
s.SharedIndexInformer.AddEventHandlerWithResyncPeriod(&handlerPreserveGVK{s.gvk, handler}, resyncPeriod)
|
||||
}
|
||||
|
||||
type handlerPreserveGVK struct {
|
||||
gvk schema.GroupVersionKind
|
||||
cache.ResourceEventHandler
|
||||
}
|
||||
|
||||
func (h *handlerPreserveGVK) resetGroupVersionKind(obj interface{}) {
|
||||
if v, ok := obj.(schema.ObjectKind); ok {
|
||||
v.SetGroupVersionKind(h.gvk)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handlerPreserveGVK) OnAdd(obj interface{}) {
|
||||
h.resetGroupVersionKind(obj)
|
||||
h.ResourceEventHandler.OnAdd(obj)
|
||||
}
|
||||
|
||||
func (h *handlerPreserveGVK) OnUpdate(oldObj, newObj interface{}) {
|
||||
h.resetGroupVersionKind(oldObj)
|
||||
h.resetGroupVersionKind(newObj)
|
||||
h.ResourceEventHandler.OnUpdate(oldObj, newObj)
|
||||
}
|
||||
|
||||
func (h *handlerPreserveGVK) OnDelete(obj interface{}) {
|
||||
h.resetGroupVersionKind(obj)
|
||||
h.ResourceEventHandler.OnDelete(obj)
|
||||
}
|
43
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go
generated
vendored
Normal file
43
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// SelectorsByGVK associate a GroupVersionKind to a field/label selector.
|
||||
type SelectorsByGVK map[schema.GroupVersionKind]Selector
|
||||
|
||||
// Selector specify the label/field selector to fill in ListOptions.
|
||||
type Selector struct {
|
||||
Label labels.Selector
|
||||
Field fields.Selector
|
||||
}
|
||||
|
||||
// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed.
|
||||
func (s Selector) ApplyToList(listOpts *metav1.ListOptions) {
|
||||
if s.Label != nil {
|
||||
listOpts.LabelSelector = s.Label.String()
|
||||
}
|
||||
if s.Field != nil {
|
||||
listOpts.FieldSelector = s.Field.String()
|
||||
}
|
||||
}
|
155
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
155
vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go
generated
vendored
@ -22,21 +22,25 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/rest"
|
||||
toolscache "k8s.io/client-go/tools/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
|
||||
)
|
||||
|
||||
// NewCacheFunc - Function for creating a new cache from the options and a rest config
|
||||
// NewCacheFunc - Function for creating a new cache from the options and a rest config.
|
||||
type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error)
|
||||
|
||||
// a new global namespaced cache to handle cluster scoped resources.
|
||||
const globalCache = "_cluster-scope"
|
||||
|
||||
// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache.
|
||||
// This will scope the cache to a list of namespaces. Listing for all namespaces
|
||||
// will list for all the namespaces that this knows about. Note that this is not intended
|
||||
// will list for all the namespaces that this knows about. By default this will create
|
||||
// a global cache for cluster scoped resource. Note that this is not intended
|
||||
// to be used for excluding namespaces, this is better done via a Predicate. Also note that
|
||||
// you may face performance issues when using this with a high number of namespaces.
|
||||
func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
|
||||
@ -45,7 +49,15 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caches := map[string]Cache{}
|
||||
|
||||
// create a cache for cluster scoped resources
|
||||
gCache, err := New(config, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating global cache %v", err)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
opts.Namespace = ns
|
||||
c, err := New(config, opts)
|
||||
@ -54,7 +66,7 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
|
||||
}
|
||||
caches[ns] = c
|
||||
}
|
||||
return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme}, nil
|
||||
return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,13 +77,32 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
|
||||
type multiNamespaceCache struct {
|
||||
namespaceToCache map[string]Cache
|
||||
Scheme *runtime.Scheme
|
||||
RESTMapper apimeta.RESTMapper
|
||||
clusterCache Cache
|
||||
}
|
||||
|
||||
var _ Cache = &multiNamespaceCache{}
|
||||
|
||||
// Methods for multiNamespaceCache to conform to the Informers interface
|
||||
func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) {
|
||||
// Methods for multiNamespaceCache to conform to the Informers interface.
|
||||
func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) {
|
||||
informers := map[string]Informer{}
|
||||
|
||||
// If the object is clusterscoped, get the informer from clusterCache,
|
||||
// if not use the namespaced caches.
|
||||
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isNamespaced {
|
||||
clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
informers[globalCache] = clusterCacheInf
|
||||
|
||||
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
|
||||
}
|
||||
|
||||
for ns, cache := range c.namespaceToCache {
|
||||
informer, err := cache.GetInformer(ctx, obj)
|
||||
if err != nil {
|
||||
@ -79,11 +110,29 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj runtime.Objec
|
||||
}
|
||||
informers[ns] = informer
|
||||
}
|
||||
|
||||
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) {
|
||||
informers := map[string]Informer{}
|
||||
|
||||
// If the object is clusterscoped, get the informer from clusterCache,
|
||||
// if not use the namespaced caches.
|
||||
isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isNamespaced {
|
||||
clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
informers[globalCache] = clusterCacheInf
|
||||
|
||||
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
|
||||
}
|
||||
|
||||
for ns, cache := range c.namespaceToCache {
|
||||
informer, err := cache.GetInformerForKind(ctx, gvk)
|
||||
if err != nil {
|
||||
@ -91,33 +140,58 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema
|
||||
}
|
||||
informers[ns] = informer
|
||||
}
|
||||
|
||||
return &multiNamespaceInformer{namespaceToInformer: informers}, nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) Start(stopCh <-chan struct{}) error {
|
||||
func (c *multiNamespaceCache) Start(ctx context.Context) error {
|
||||
// start global cache
|
||||
go func() {
|
||||
err := c.clusterCache.Start(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "cluster scoped cache failed to start")
|
||||
}
|
||||
}()
|
||||
|
||||
// start namespaced caches
|
||||
for ns, cache := range c.namespaceToCache {
|
||||
go func(ns string, cache Cache) {
|
||||
err := cache.Start(stopCh)
|
||||
err := cache.Start(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns)
|
||||
}
|
||||
}(ns, cache)
|
||||
}
|
||||
<-stopCh
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) WaitForCacheSync(stop <-chan struct{}) bool {
|
||||
func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool {
|
||||
synced := true
|
||||
for _, cache := range c.namespaceToCache {
|
||||
if s := cache.WaitForCacheSync(stop); !s {
|
||||
if s := cache.WaitForCacheSync(ctx); !s {
|
||||
synced = s
|
||||
}
|
||||
}
|
||||
|
||||
// check if cluster scoped cache has synced
|
||||
if !c.clusterCache.WaitForCacheSync(ctx) {
|
||||
synced = false
|
||||
}
|
||||
return synced
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error {
|
||||
func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
|
||||
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return nil //nolint:nilerr
|
||||
}
|
||||
|
||||
if !isNamespaced {
|
||||
return c.clusterCache.IndexField(ctx, obj, field, extractValue)
|
||||
}
|
||||
|
||||
for _, cache := range c.namespaceToCache {
|
||||
if err := cache.IndexField(ctx, obj, field, extractValue); err != nil {
|
||||
return err
|
||||
@ -126,7 +200,17 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj runtime.Object
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
|
||||
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
|
||||
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isNamespaced {
|
||||
// Look into the global cache to fetch the object
|
||||
return c.clusterCache.Get(ctx, key, obj)
|
||||
}
|
||||
|
||||
cache, ok := c.namespaceToCache[key.Namespace]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key)
|
||||
@ -135,9 +219,20 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj
|
||||
}
|
||||
|
||||
// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces.
|
||||
func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error {
|
||||
func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
listOpts := client.ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isNamespaced {
|
||||
// Look at the global cache to get the objects with the specified GVK
|
||||
return c.clusterCache.List(ctx, list, opts...)
|
||||
}
|
||||
|
||||
if listOpts.Namespace != corev1.NamespaceAll {
|
||||
cache, ok := c.namespaceToCache[listOpts.Namespace]
|
||||
if !ok {
|
||||
@ -146,7 +241,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opt
|
||||
return cache.List(ctx, list, opts...)
|
||||
}
|
||||
|
||||
listAccessor, err := meta.ListAccessor(list)
|
||||
listAccessor, err := apimeta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -155,10 +250,13 @@ func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limitSet := listOpts.Limit > 0
|
||||
|
||||
var resourceVersion string
|
||||
for _, cache := range c.namespaceToCache {
|
||||
listObj := list.DeepCopyObject()
|
||||
err = cache.List(ctx, listObj, opts...)
|
||||
listObj := list.DeepCopyObject().(client.ObjectList)
|
||||
err = cache.List(ctx, listObj, &listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -166,41 +264,52 @@ func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor, err := meta.ListAccessor(listObj)
|
||||
accessor, err := apimeta.ListAccessor(listObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("object: %T must be a list type", list)
|
||||
}
|
||||
allItems = append(allItems, items...)
|
||||
// The last list call should have the most correct resource version.
|
||||
resourceVersion = accessor.GetResourceVersion()
|
||||
if limitSet {
|
||||
// decrement Limit by the number of items
|
||||
// fetched from the current namespace.
|
||||
listOpts.Limit -= int64(len(items))
|
||||
// if a Limit was set and the number of
|
||||
// items read has reached this set limit,
|
||||
// then stop reading.
|
||||
if listOpts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
listAccessor.SetResourceVersion(resourceVersion)
|
||||
|
||||
return apimeta.SetList(list, allItems)
|
||||
}
|
||||
|
||||
// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces
|
||||
// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces.
|
||||
type multiNamespaceInformer struct {
|
||||
namespaceToInformer map[string]Informer
|
||||
}
|
||||
|
||||
var _ Informer = &multiNamespaceInformer{}
|
||||
|
||||
// AddEventHandler adds the handler to each namespaced informer
|
||||
// AddEventHandler adds the handler to each namespaced informer.
|
||||
func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
|
||||
for _, informer := range i.namespaceToInformer {
|
||||
informer.AddEventHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer
|
||||
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer.
|
||||
func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
for _, informer := range i.namespaceToInformer {
|
||||
informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIndexers adds the indexer for each namespaced informer
|
||||
// AddIndexers adds the indexer for each namespaced informer.
|
||||
func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error {
|
||||
for _, informer := range i.namespaceToInformer {
|
||||
err := informer.AddIndexers(indexers)
|
||||
@ -211,7 +320,7 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasSynced checks if each namespaced informer has synced
|
||||
// HasSynced checks if each namespaced informer has synced.
|
||||
func (i *multiNamespaceInformer) HasSynced() bool {
|
||||
for _, informer := range i.namespaceToInformer {
|
||||
if ok := informer.HasSynced(); !ok {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,10 +17,11 @@ limitations under the License.
|
||||
package certwatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
)
|
||||
|
||||
@ -30,7 +31,7 @@ var log = logf.RuntimeLog.WithName("certwatcher")
|
||||
// changes, it reads and parses both and calls an optional callback with the new
|
||||
// certificate.
|
||||
type CertWatcher struct {
|
||||
sync.Mutex
|
||||
sync.RWMutex
|
||||
|
||||
currentCert *tls.Certificate
|
||||
watcher *fsnotify.Watcher
|
||||
@ -63,13 +64,13 @@ func New(certPath, keyPath string) (*CertWatcher, error) {
|
||||
|
||||
// GetCertificate fetches the currently loaded certificate, which may be nil.
|
||||
func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
cw.Lock()
|
||||
defer cw.Unlock()
|
||||
cw.RLock()
|
||||
defer cw.RUnlock()
|
||||
return cw.currentCert, nil
|
||||
}
|
||||
|
||||
// Start starts the watch on the certificate and key files.
|
||||
func (cw *CertWatcher) Start(stopCh <-chan struct{}) error {
|
||||
func (cw *CertWatcher) Start(ctx context.Context) error {
|
||||
files := []string{cw.certPath, cw.keyPath}
|
||||
|
||||
for _, f := range files {
|
||||
@ -82,8 +83,8 @@ func (cw *CertWatcher) Start(stopCh <-chan struct{}) error {
|
||||
|
||||
log.Info("Starting certificate watcher")
|
||||
|
||||
// Block until the stop channel is closed.
|
||||
<-stopCh
|
||||
// Block until the context is done.
|
||||
<-ctx.Done()
|
||||
|
||||
return cw.watcher.Close()
|
||||
}
|
23
vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go
generated
vendored
Normal file
23
vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package certwatcher is a helper for reloading Certificates from disk to be used
|
||||
with tls servers. It provides a helper func `GetCertificate` which can be
|
||||
called from `tls.Config` and passed into your tls.Listener. For a detailed
|
||||
example server view pkg/webhook/server.go.
|
||||
*/
|
||||
package certwatcher
|
92
vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
generated
vendored
92
vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
generated
vendored
@ -21,16 +21,41 @@ package apiutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/discovery"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
)
|
||||
|
||||
var (
|
||||
protobufScheme = runtime.NewScheme()
|
||||
protobufSchemeLock sync.RWMutex
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers.
|
||||
// For custom resources, CRDs can not support Protocol Buffers but Aggregated API can.
|
||||
// See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
|
||||
if err := clientgoscheme.AddToScheme(protobufScheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should
|
||||
// be additional types that do support protobuf.
|
||||
func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error {
|
||||
protobufSchemeLock.Lock()
|
||||
defer protobufSchemeLock.Unlock()
|
||||
return addToScheme(protobufScheme)
|
||||
}
|
||||
|
||||
// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery
|
||||
// information fetched by a new client with the given config.
|
||||
func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) {
|
||||
@ -48,12 +73,33 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) {
|
||||
|
||||
// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK.
|
||||
func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) {
|
||||
// TODO(directxman12): do we want to generalize this to arbitrary container types?
|
||||
// I think we'd need a generalized form of scheme or something. It's a
|
||||
// shame there's not a reliable "GetGVK" interface that works by default
|
||||
// for unpopulated static types and populated "dynamic" types
|
||||
// (unstructured, partial, etc)
|
||||
|
||||
// check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds
|
||||
_, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort
|
||||
_, isPartialList := obj.(*metav1.PartialObjectMetadataList)
|
||||
if isPartial || isPartialList {
|
||||
// we require that the GVK be populated in order to recognize the object
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
if len(gvk.Kind) == 0 {
|
||||
return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind")
|
||||
}
|
||||
if len(gvk.Version) == 0 {
|
||||
return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version")
|
||||
}
|
||||
return gvk, nil
|
||||
}
|
||||
|
||||
gvks, isUnversioned, err := scheme.ObjectKinds(obj)
|
||||
if err != nil {
|
||||
return schema.GroupVersionKind{}, err
|
||||
}
|
||||
if isUnversioned {
|
||||
return schema.GroupVersionKind{}, fmt.Errorf("cannot create a new informer for the unversioned type %T", obj)
|
||||
return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj)
|
||||
}
|
||||
|
||||
if len(gvks) < 1 {
|
||||
@ -71,16 +117,25 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi
|
||||
// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated
|
||||
// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from
|
||||
// baseConfig, if set, otherwise a default serializer will be set.
|
||||
func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) {
|
||||
cfg := createRestConfig(gvk, baseConfig)
|
||||
if cfg.NegotiatedSerializer == nil {
|
||||
cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs}
|
||||
}
|
||||
return rest.RESTClientFor(cfg)
|
||||
func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) {
|
||||
return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs))
|
||||
}
|
||||
|
||||
//createRestConfig copies the base config and updates needed fields for a new rest config
|
||||
func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *rest.Config {
|
||||
// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory
|
||||
// in order to avoid clearing the GVK from the decoded object.
|
||||
//
|
||||
// See https://github.com/kubernetes/kubernetes/issues/80609.
|
||||
type serializerWithDecodedGVK struct {
|
||||
serializer.WithoutConversionCodecFactory
|
||||
}
|
||||
|
||||
// DecoderToVersion returns an decoder that does not do conversion.
|
||||
func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
|
||||
return serializer
|
||||
}
|
||||
|
||||
// createRestConfig copies the base config and updates needed fields for a new rest config.
|
||||
func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config {
|
||||
gv := gvk.GroupVersion()
|
||||
|
||||
cfg := rest.CopyConfig(baseConfig)
|
||||
@ -93,5 +148,24 @@ func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *res
|
||||
if cfg.UserAgent == "" {
|
||||
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
// TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true.
|
||||
if cfg.ContentType == "" && !isUnstructured {
|
||||
protobufSchemeLock.RLock()
|
||||
if protobufScheme.Recognizes(gvk) {
|
||||
cfg.ContentType = runtime.ContentTypeProtobuf
|
||||
}
|
||||
protobufSchemeLock.RUnlock()
|
||||
}
|
||||
|
||||
if cfg.NegotiatedSerializer == nil {
|
||||
if isUnstructured {
|
||||
// If the object is unstructured, we need to preserve the GVK information.
|
||||
// Use our own custom serializer.
|
||||
cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
|
||||
} else {
|
||||
cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
60
vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go
generated
vendored
60
vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go
generated
vendored
@ -19,7 +19,6 @@ package apiutil
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -29,34 +28,12 @@ import (
|
||||
"k8s.io/client-go/restmapper"
|
||||
)
|
||||
|
||||
// ErrRateLimited is returned by a RESTMapper method if the number of API
|
||||
// calls has exceeded a limit within a certain time period.
|
||||
type ErrRateLimited struct {
|
||||
// Duration to wait until the next API call can be made.
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
func (e ErrRateLimited) Error() string {
|
||||
return "too many API calls to the RESTMapper within a timeframe"
|
||||
}
|
||||
|
||||
// DelayIfRateLimited returns the delay time until the next API call is
|
||||
// allowed and true if err is of type ErrRateLimited. The zero
|
||||
// time.Duration value and false are returned if err is not a ErrRateLimited.
|
||||
func DelayIfRateLimited(err error) (time.Duration, bool) {
|
||||
var rlerr ErrRateLimited
|
||||
if errors.As(err, &rlerr) {
|
||||
return rlerr.Delay, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// dynamicRESTMapper is a RESTMapper that dynamically discovers resource
|
||||
// types at runtime.
|
||||
type dynamicRESTMapper struct {
|
||||
mu sync.RWMutex // protects the following fields
|
||||
staticMapper meta.RESTMapper
|
||||
limiter *dynamicLimiter
|
||||
limiter *rate.Limiter
|
||||
newMapper func() (meta.RESTMapper, error)
|
||||
|
||||
lazy bool
|
||||
@ -64,13 +41,13 @@ type dynamicRESTMapper struct {
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper
|
||||
// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper.
|
||||
type DynamicRESTMapperOption func(*dynamicRESTMapper) error
|
||||
|
||||
// WithLimiter sets the RESTMapper's underlying limiter to lim.
|
||||
func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption {
|
||||
return func(drm *dynamicRESTMapper) error {
|
||||
drm.limiter = &dynamicLimiter{lim}
|
||||
drm.limiter = lim
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -103,9 +80,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (me
|
||||
return nil, err
|
||||
}
|
||||
drm := &dynamicRESTMapper{
|
||||
limiter: &dynamicLimiter{
|
||||
rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize),
|
||||
},
|
||||
limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize),
|
||||
newMapper: func() (meta.RESTMapper, error) {
|
||||
groupResources, err := restmapper.GetAPIGroupResources(client)
|
||||
if err != nil {
|
||||
@ -161,12 +136,13 @@ func (drm *dynamicRESTMapper) init() (err error) {
|
||||
// checkAndReload attempts to call the given callback, which is assumed to be dependent
|
||||
// on the data in the restmapper.
|
||||
//
|
||||
// If the callback returns a NoKindMatchError, it will attempt to reload
|
||||
// If the callback returns an error that matches the given error, it will attempt to reload
|
||||
// the RESTMapper's data and re-call the callback once that's occurred.
|
||||
// If the callback returns any other error, the function will return immediately regardless.
|
||||
//
|
||||
// It will take care
|
||||
// ensuring that reloads are rate-limitted and that extraneous calls aren't made.
|
||||
// It will take care of ensuring that reloads are rate-limited and that extraneous calls
|
||||
// aren't made. If a reload would exceed the limiters rate, it returns the error return by
|
||||
// the callback.
|
||||
// It's thread-safe, and worries about thread-safety for the callback (so the callback does
|
||||
// not need to attempt to lock the restmapper).
|
||||
func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error {
|
||||
@ -199,7 +175,9 @@ func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsRel
|
||||
}
|
||||
|
||||
// we're still stale, so grab a rate-limit token if we can...
|
||||
if err := drm.limiter.checkRate(); err != nil {
|
||||
if !drm.limiter.Allow() {
|
||||
// return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter)
|
||||
// so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError
|
||||
return err
|
||||
}
|
||||
|
||||
@ -305,19 +283,3 @@ func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, err
|
||||
})
|
||||
return singular, err
|
||||
}
|
||||
|
||||
// dynamicLimiter holds a rate limiter used to throttle chatty RESTMapper users.
|
||||
type dynamicLimiter struct {
|
||||
*rate.Limiter
|
||||
}
|
||||
|
||||
// checkRate returns an ErrRateLimited if too many API calls have been made
|
||||
// within the set limit.
|
||||
func (b *dynamicLimiter) checkRate() error {
|
||||
res := b.Reserve()
|
||||
if res.Delay() == 0 {
|
||||
return nil
|
||||
}
|
||||
res.Cancel()
|
||||
return ErrRateLimited{res.Delay()}
|
||||
}
|
||||
|
230
vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
generated
vendored
230
vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
generated
vendored
@ -19,24 +19,47 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/metadata"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// Options are creation options for a Client
|
||||
// WarningHandlerOptions are options for configuring a
|
||||
// warning handler for the client which is responsible
|
||||
// for surfacing API Server warnings.
|
||||
type WarningHandlerOptions struct {
|
||||
// SuppressWarnings decides if the warnings from the
|
||||
// API server are suppressed or surfaced in the client.
|
||||
SuppressWarnings bool
|
||||
// AllowDuplicateLogs does not deduplicate the to-be
|
||||
// logged surfaced warnings messages. See
|
||||
// log.WarningHandlerOptions for considerations
|
||||
// regarding deuplication
|
||||
AllowDuplicateLogs bool
|
||||
}
|
||||
|
||||
// Options are creation options for a Client.
|
||||
type Options struct {
|
||||
// Scheme, if provided, will be used to map go structs to GroupVersionKinds
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
// Mapper, if provided, will be used to map GroupVersionKinds to Resources
|
||||
Mapper meta.RESTMapper
|
||||
|
||||
// Opts is used to configure the warning handler responsible for
|
||||
// surfacing and handling warnings messages sent by the API server.
|
||||
Opts WarningHandlerOptions
|
||||
}
|
||||
|
||||
// New returns a new Client using the provided config and Options.
|
||||
@ -50,10 +73,31 @@ type Options struct {
|
||||
// case of unstructured types, the group, version, and kind will be extracted
|
||||
// from the corresponding fields on the object.
|
||||
func New(config *rest.Config, options Options) (Client, error) {
|
||||
return newClient(config, options)
|
||||
}
|
||||
|
||||
func newClient(config *rest.Config, options Options) (*client, error) {
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("must provide non-nil rest.Config to client.New")
|
||||
}
|
||||
|
||||
if !options.Opts.SuppressWarnings {
|
||||
// surface warnings
|
||||
logger := log.Log.WithName("KubeAPIWarningLogger")
|
||||
// Set a WarningHandler, the default WarningHandler
|
||||
// is log.KubeAPIWarningLogger with deduplication enabled.
|
||||
// See log.KubeAPIWarningLoggerOptions for considerations
|
||||
// regarding deduplication.
|
||||
rest.SetDefaultWarningHandler(
|
||||
log.NewKubeAPIWarningLogger(
|
||||
logger,
|
||||
log.KubeAPIWarningLoggerOptions{
|
||||
Deduplicate: !options.Opts.AllowDuplicateLogs,
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Init a scheme if none provided
|
||||
if options.Scheme == nil {
|
||||
options.Scheme = scheme.Scheme
|
||||
@ -69,11 +113,18 @@ func New(config *rest.Config, options Options) (Client, error) {
|
||||
}
|
||||
|
||||
clientcache := &clientCache{
|
||||
config: config,
|
||||
scheme: options.Scheme,
|
||||
mapper: options.Mapper,
|
||||
codecs: serializer.NewCodecFactory(options.Scheme),
|
||||
resourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
|
||||
config: config,
|
||||
scheme: options.Scheme,
|
||||
mapper: options.Mapper,
|
||||
codecs: serializer.NewCodecFactory(options.Scheme),
|
||||
|
||||
structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
|
||||
unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
|
||||
}
|
||||
|
||||
rawMetaClient, err := metadata.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err)
|
||||
}
|
||||
|
||||
c := &client{
|
||||
@ -85,6 +136,12 @@ func New(config *rest.Config, options Options) (Client, error) {
|
||||
cache: clientcache,
|
||||
paramCodec: noConversionParamCodec{},
|
||||
},
|
||||
metadataClient: metadataClient{
|
||||
client: rawMetaClient,
|
||||
restMapper: options.Mapper,
|
||||
},
|
||||
scheme: options.Scheme,
|
||||
mapper: options.Mapper,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
@ -97,10 +154,12 @@ var _ Client = &client{}
|
||||
type client struct {
|
||||
typedClient typedClient
|
||||
unstructuredClient unstructuredClient
|
||||
metadataClient metadataClient
|
||||
scheme *runtime.Scheme
|
||||
mapper meta.RESTMapper
|
||||
}
|
||||
|
||||
// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object.
|
||||
// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16?
|
||||
func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) {
|
||||
if gvk != schema.EmptyObjectKind.GroupVersionKind() {
|
||||
if v, ok := obj.(schema.ObjectKind); ok {
|
||||
@ -109,100 +168,161 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi
|
||||
}
|
||||
}
|
||||
|
||||
// Create implements client.Client
|
||||
func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
// Scheme returns the scheme this client is using.
|
||||
func (c *client) Scheme() *runtime.Scheme {
|
||||
return c.scheme
|
||||
}
|
||||
|
||||
// RESTMapper returns the scheme this client is using.
|
||||
func (c *client) RESTMapper() meta.RESTMapper {
|
||||
return c.mapper
|
||||
}
|
||||
|
||||
// Create implements client.Client.
|
||||
func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.Create(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return fmt.Errorf("cannot create using only metadata")
|
||||
default:
|
||||
return c.typedClient.Create(ctx, obj, opts...)
|
||||
}
|
||||
return c.typedClient.Create(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Update implements client.Client
|
||||
func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.Client.
|
||||
func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.Update(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return fmt.Errorf("cannot update using only metadata -- did you mean to patch?")
|
||||
default:
|
||||
return c.typedClient.Update(ctx, obj, opts...)
|
||||
}
|
||||
return c.typedClient.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Delete implements client.Client
|
||||
func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
// Delete implements client.Client.
|
||||
func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.Delete(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return c.metadataClient.Delete(ctx, obj, opts...)
|
||||
default:
|
||||
return c.typedClient.Delete(ctx, obj, opts...)
|
||||
}
|
||||
return c.typedClient.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client
|
||||
func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return c.metadataClient.DeleteAllOf(ctx, obj, opts...)
|
||||
default:
|
||||
return c.typedClient.DeleteAllOf(ctx, obj, opts...)
|
||||
}
|
||||
return c.typedClient.DeleteAllOf(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch implements client.Client
|
||||
func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.Client.
|
||||
func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.Patch(ctx, obj, patch, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return c.metadataClient.Patch(ctx, obj, patch, opts...)
|
||||
default:
|
||||
return c.typedClient.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
return c.typedClient.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
// Get implements client.Client
|
||||
func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
// Get implements client.Client.
|
||||
func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return c.unstructuredClient.Get(ctx, key, obj)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
// Metadata only object should always preserve the GVK coming in from the caller.
|
||||
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
|
||||
return c.metadataClient.Get(ctx, key, obj)
|
||||
default:
|
||||
return c.typedClient.Get(ctx, key, obj)
|
||||
}
|
||||
return c.typedClient.Get(ctx, key, obj)
|
||||
}
|
||||
|
||||
// List implements client.Client
|
||||
func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error {
|
||||
_, ok := obj.(*unstructured.UnstructuredList)
|
||||
if ok {
|
||||
// List implements client.Client.
|
||||
func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
switch x := obj.(type) {
|
||||
case *unstructured.UnstructuredList:
|
||||
return c.unstructuredClient.List(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadataList:
|
||||
// Metadata only object should always preserve the GVK.
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
defer c.resetGroupVersionKind(obj, gvk)
|
||||
|
||||
// Call the list client.
|
||||
if err := c.metadataClient.List(ctx, obj, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Restore the GVK for each item in the list.
|
||||
itemGVK := schema.GroupVersionKind{
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
// TODO: this is producing unsafe guesses that don't actually work,
|
||||
// but it matches ~99% of the cases out there.
|
||||
Kind: strings.TrimSuffix(gvk.Kind, "List"),
|
||||
}
|
||||
for i := range x.Items {
|
||||
item := &x.Items[i]
|
||||
item.SetGroupVersionKind(itemGVK)
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return c.typedClient.List(ctx, obj, opts...)
|
||||
}
|
||||
return c.typedClient.List(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Status implements client.StatusClient
|
||||
// Status implements client.StatusClient.
|
||||
func (c *client) Status() StatusWriter {
|
||||
return &statusWriter{client: c}
|
||||
}
|
||||
|
||||
// statusWriter is client.StatusWriter that writes status subresource
|
||||
// statusWriter is client.StatusWriter that writes status subresource.
|
||||
type statusWriter struct {
|
||||
client *client
|
||||
}
|
||||
|
||||
// ensure statusWriter implements client.StatusWriter
|
||||
// ensure statusWriter implements client.StatusWriter.
|
||||
var _ StatusWriter = &statusWriter{}
|
||||
|
||||
// Update implements client.StatusWriter
|
||||
func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.StatusWriter.
|
||||
func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?")
|
||||
default:
|
||||
return sw.client.typedClient.UpdateStatus(ctx, obj, opts...)
|
||||
}
|
||||
return sw.client.typedClient.UpdateStatus(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch implements client.Client
|
||||
func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.Client.
|
||||
func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if ok {
|
||||
switch obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...)
|
||||
case *metav1.PartialObjectMetadata:
|
||||
return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...)
|
||||
default:
|
||||
return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...)
|
||||
}
|
||||
return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
38
vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go
generated
vendored
38
vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
@ -29,7 +30,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// clientCache creates and caches rest clients and metadata for Kubernetes types
|
||||
// clientCache creates and caches rest clients and metadata for Kubernetes types.
|
||||
type clientCache struct {
|
||||
// config is the rest.Config to talk to an apiserver
|
||||
config *rest.Config
|
||||
@ -43,20 +44,22 @@ type clientCache struct {
|
||||
// codecs are used to create a REST client for a gvk
|
||||
codecs serializer.CodecFactory
|
||||
|
||||
// resourceByType caches type metadata
|
||||
resourceByType map[schema.GroupVersionKind]*resourceMeta
|
||||
mu sync.RWMutex
|
||||
// structuredResourceByType caches structured type metadata
|
||||
structuredResourceByType map[schema.GroupVersionKind]*resourceMeta
|
||||
// unstructuredResourceByType caches unstructured type metadata
|
||||
unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource.
|
||||
// If the object is a list, the resource represents the item's type instead.
|
||||
func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList bool) (*resourceMeta, error) {
|
||||
func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) {
|
||||
if strings.HasSuffix(gvk.Kind, "List") && isList {
|
||||
// if this was a list, treat it as a request for the item's resource
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
client, err := apiutil.RESTClientForGVK(gvk, c.config, c.codecs)
|
||||
client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -75,10 +78,18 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
|
||||
isUnstructured = isUnstructured || isUnstructuredList
|
||||
|
||||
// It's better to do creation work twice than to not let multiple
|
||||
// people make requests at once
|
||||
c.mu.RLock()
|
||||
r, known := c.resourceByType[gvk]
|
||||
resourceByType := c.structuredResourceByType
|
||||
if isUnstructured {
|
||||
resourceByType = c.unstructuredResourceByType
|
||||
}
|
||||
r, known := resourceByType[gvk]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if known {
|
||||
@ -88,15 +99,15 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) {
|
||||
// Initialize a new Client
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
r, err = c.newResource(gvk, meta.IsListType(obj))
|
||||
r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.resourceByType[gvk] = r
|
||||
resourceByType[gvk] = r
|
||||
return r, err
|
||||
}
|
||||
|
||||
// getObjMeta returns objMeta containing both type and object metadata and state
|
||||
// getObjMeta returns objMeta containing both type and object metadata and state.
|
||||
func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) {
|
||||
r, err := c.getResource(obj)
|
||||
if err != nil {
|
||||
@ -119,18 +130,17 @@ type resourceMeta struct {
|
||||
mapping *meta.RESTMapping
|
||||
}
|
||||
|
||||
// isNamespaced returns true if the type is namespaced
|
||||
// isNamespaced returns true if the type is namespaced.
|
||||
func (r *resourceMeta) isNamespaced() bool {
|
||||
return r.mapping.Scope.Name() != meta.RESTScopeNameRoot
|
||||
|
||||
}
|
||||
|
||||
// resource returns the resource name of the type
|
||||
// resource returns the resource name of the type.
|
||||
func (r *resourceMeta) resource() string {
|
||||
return r.mapping.Resource.Resource
|
||||
}
|
||||
|
||||
// objMeta stores type and object information about a Kubernetes type
|
||||
// objMeta stores type and object information about a Kubernetes type.
|
||||
type objMeta struct {
|
||||
// resourceMeta contains type information for the object
|
||||
*resourceMeta
|
||||
|
18
vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go
generated
vendored
18
vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go
generated
vendored
@ -1,3 +1,19 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
@ -12,7 +28,7 @@ import (
|
||||
var _ runtime.ParameterCodec = noConversionParamCodec{}
|
||||
|
||||
// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings.
|
||||
// it's useful in scenarios with the unstructured client and arbitrary resouces.
|
||||
// it's useful in scenarios with the unstructured client and arbitrary resources.
|
||||
type noConversionParamCodec struct{}
|
||||
|
||||
func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) {
|
||||
|
20
vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go
generated
vendored
20
vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go
generated
vendored
@ -30,19 +30,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig, apiServerURL string
|
||||
log = logf.RuntimeLog.WithName("client").WithName("config")
|
||||
kubeconfig string
|
||||
log = logf.RuntimeLog.WithName("client").WithName("config")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// TODO: Fix this to allow double vendoring this library but still register flags on behalf of users
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "",
|
||||
"Paths to a kubeconfig. Only required if out-of-cluster.")
|
||||
|
||||
// This flag is deprecated, it'll be removed in a future iteration, please switch to --kubeconfig.
|
||||
flag.StringVar(&apiServerURL, "master", "",
|
||||
"(Deprecated: switch to `--kubeconfig`) The address of the Kubernetes API server. Overrides any value in kubeconfig. "+
|
||||
"Only required if out-of-cluster.")
|
||||
}
|
||||
|
||||
// GetConfig creates a *rest.Config for talking to a Kubernetes API server.
|
||||
@ -60,7 +55,7 @@ func init() {
|
||||
//
|
||||
// * In-cluster config if running in cluster
|
||||
//
|
||||
// * $HOME/.kube/config if exists
|
||||
// * $HOME/.kube/config if exists.
|
||||
func GetConfig() (*rest.Config, error) {
|
||||
return GetConfigWithContext("")
|
||||
}
|
||||
@ -80,7 +75,7 @@ func GetConfig() (*rest.Config, error) {
|
||||
//
|
||||
// * In-cluster config if running in cluster
|
||||
//
|
||||
// * $HOME/.kube/config if exists
|
||||
// * $HOME/.kube/config if exists.
|
||||
func GetConfigWithContext(context string) (*rest.Config, error) {
|
||||
cfg, err := loadConfig(context)
|
||||
if err != nil {
|
||||
@ -100,12 +95,11 @@ func GetConfigWithContext(context string) (*rest.Config, error) {
|
||||
// test the precedence of loading the config.
|
||||
var loadInClusterConfig = rest.InClusterConfig
|
||||
|
||||
// loadConfig loads a REST Config as per the rules specified in GetConfig
|
||||
// loadConfig loads a REST Config as per the rules specified in GetConfig.
|
||||
func loadConfig(context string) (*rest.Config, error) {
|
||||
|
||||
// If a flag is specified with the config location, use that
|
||||
if len(kubeconfig) > 0 {
|
||||
return loadConfigWithContext(apiServerURL, &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context)
|
||||
return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context)
|
||||
}
|
||||
|
||||
// If the recommended kubeconfig env variable is not specified,
|
||||
@ -134,7 +128,7 @@ func loadConfig(context string) (*rest.Config, error) {
|
||||
loadingRules.Precedence = append(loadingRules.Precedence, path.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName))
|
||||
}
|
||||
|
||||
return loadConfigWithContext(apiServerURL, loadingRules, context)
|
||||
return loadConfigWithContext("", loadingRules, context)
|
||||
}
|
||||
|
||||
func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) {
|
||||
|
51
vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
generated
vendored
51
vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
generated
vendored
@ -19,6 +19,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@ -35,47 +36,57 @@ type dryRunClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// Create implements client.Client
|
||||
func (c *dryRunClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error {
|
||||
// Scheme returns the scheme this client is using.
|
||||
func (c *dryRunClient) Scheme() *runtime.Scheme {
|
||||
return c.client.Scheme()
|
||||
}
|
||||
|
||||
// RESTMapper returns the rest mapper this client is using.
|
||||
func (c *dryRunClient) RESTMapper() meta.RESTMapper {
|
||||
return c.client.RESTMapper()
|
||||
}
|
||||
|
||||
// Create implements client.Client.
|
||||
func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||
return c.client.Create(ctx, obj, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// Update implements client.Client
|
||||
func (c *dryRunClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.Client.
|
||||
func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
return c.client.Update(ctx, obj, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// Delete implements client.Client
|
||||
func (c *dryRunClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error {
|
||||
// Delete implements client.Client.
|
||||
func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
return c.client.Delete(ctx, obj, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client
|
||||
func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error {
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// Patch implements client.Client
|
||||
func (c *dryRunClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.Client.
|
||||
func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// Get implements client.Client
|
||||
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error {
|
||||
// Get implements client.Client.
|
||||
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
return c.client.Get(ctx, key, obj)
|
||||
}
|
||||
|
||||
// List implements client.Client
|
||||
func (c *dryRunClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error {
|
||||
// List implements client.Client.
|
||||
func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
return c.client.List(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Status implements client.StatusClient
|
||||
// Status implements client.StatusClient.
|
||||
func (c *dryRunClient) Status() StatusWriter {
|
||||
return &dryRunStatusWriter{client: c.client.Status()}
|
||||
}
|
||||
|
||||
// ensure dryRunStatusWriter implements client.StatusWriter
|
||||
// ensure dryRunStatusWriter implements client.StatusWriter.
|
||||
var _ StatusWriter = &dryRunStatusWriter{}
|
||||
|
||||
// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode
|
||||
@ -84,12 +95,12 @@ type dryRunStatusWriter struct {
|
||||
client StatusWriter
|
||||
}
|
||||
|
||||
// Update implements client.StatusWriter
|
||||
func (sw *dryRunStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.StatusWriter.
|
||||
func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
return sw.client.Update(ctx, obj, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
||||
// Patch implements client.StatusWriter
|
||||
func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.StatusWriter.
|
||||
func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
|
||||
}
|
||||
|
48
vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
generated
vendored
48
vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
generated
vendored
@ -24,18 +24,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
// ObjectKey identifies a Kubernetes Object.
|
||||
type ObjectKey = types.NamespacedName
|
||||
|
||||
// ObjectKeyFromObject returns the ObjectKey given a runtime.Object
|
||||
func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return ObjectKey{}, err
|
||||
}
|
||||
return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil
|
||||
// ObjectKeyFromObject returns the ObjectKey given a runtime.Object.
|
||||
func ObjectKeyFromObject(obj Object) ObjectKey {
|
||||
return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}
|
||||
}
|
||||
|
||||
// Patch is a patch that can be applied to a Kubernetes object.
|
||||
@ -43,7 +40,7 @@ type Patch interface {
|
||||
// Type is the PatchType of the patch.
|
||||
Type() types.PatchType
|
||||
// Data is the raw data representing the patch.
|
||||
Data(obj runtime.Object) ([]byte, error)
|
||||
Data(obj Object) ([]byte, error)
|
||||
}
|
||||
|
||||
// TODO(directxman12): is there a sane way to deal with get/delete options?
|
||||
@ -53,32 +50,32 @@ type Reader interface {
|
||||
// Get retrieves an obj for the given object key from the Kubernetes Cluster.
|
||||
// obj must be a struct pointer so that obj can be updated with the response
|
||||
// returned by the Server.
|
||||
Get(ctx context.Context, key ObjectKey, obj runtime.Object) error
|
||||
Get(ctx context.Context, key ObjectKey, obj Object) error
|
||||
|
||||
// List retrieves list of objects for a given namespace and list options. On a
|
||||
// successful call, Items field in the list will be populated with the
|
||||
// result returned from the server.
|
||||
List(ctx context.Context, list runtime.Object, opts ...ListOption) error
|
||||
List(ctx context.Context, list ObjectList, opts ...ListOption) error
|
||||
}
|
||||
|
||||
// Writer knows how to create, delete, and update Kubernetes objects.
|
||||
type Writer interface {
|
||||
// Create saves the object obj in the Kubernetes cluster.
|
||||
Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error
|
||||
Create(ctx context.Context, obj Object, opts ...CreateOption) error
|
||||
|
||||
// Delete deletes the given obj from Kubernetes cluster.
|
||||
Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error
|
||||
Delete(ctx context.Context, obj Object, opts ...DeleteOption) error
|
||||
|
||||
// Update updates the given obj in the Kubernetes cluster. obj must be a
|
||||
// struct pointer so that obj can be updated with the content returned by the Server.
|
||||
Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error
|
||||
Update(ctx context.Context, obj Object, opts ...UpdateOption) error
|
||||
|
||||
// Patch patches the given obj in the Kubernetes cluster. obj must be a
|
||||
// struct pointer so that obj can be updated with the content returned by the Server.
|
||||
Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error
|
||||
Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
|
||||
|
||||
// DeleteAllOf deletes all objects of the given type matching the given options.
|
||||
DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error
|
||||
DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error
|
||||
}
|
||||
|
||||
// StatusClient knows how to create a client which can update status subresource
|
||||
@ -92,12 +89,12 @@ type StatusWriter interface {
|
||||
// Update updates the fields corresponding to the status subresource for the
|
||||
// given obj. obj must be a struct pointer so that obj can be updated
|
||||
// with the content returned by the Server.
|
||||
Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error
|
||||
Update(ctx context.Context, obj Object, opts ...UpdateOption) error
|
||||
|
||||
// Patch patches the given object's subresource. obj must be a struct
|
||||
// pointer so that obj can be updated with the content returned by the
|
||||
// Server.
|
||||
Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error
|
||||
Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
|
||||
}
|
||||
|
||||
// Client knows how to perform CRUD operations on Kubernetes objects.
|
||||
@ -105,12 +102,25 @@ type Client interface {
|
||||
Reader
|
||||
Writer
|
||||
StatusClient
|
||||
|
||||
// Scheme returns the scheme this client is using.
|
||||
Scheme() *runtime.Scheme
|
||||
// RESTMapper returns the rest this client is using.
|
||||
RESTMapper() meta.RESTMapper
|
||||
}
|
||||
|
||||
// WithWatch supports Watch on top of the CRUD operations supported by
|
||||
// the normal Client. Its intended use-case are CLI apps that need to wait for
|
||||
// events.
|
||||
type WithWatch interface {
|
||||
Client
|
||||
Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// IndexerFunc knows how to take an object and turn it into a series
|
||||
// of non-namespaced keys. Namespaced objects are automatically given
|
||||
// namespaced and non-spaced variants, so keys do not need to include namespace.
|
||||
type IndexerFunc func(runtime.Object) []string
|
||||
type IndexerFunc func(Object) []string
|
||||
|
||||
// FieldIndexer knows how to index over a particular "field" such that it
|
||||
// can later be used by a field selector.
|
||||
@ -122,7 +132,7 @@ type FieldIndexer interface {
|
||||
// and "equality" in the field selector means that at least one key matches the value.
|
||||
// The FieldIndexer will automatically take care of indexing over namespace
|
||||
// and supporting efficient all-namespace queries.
|
||||
IndexField(ctx context.Context, obj runtime.Object, field string, extractValue IndexerFunc) error
|
||||
IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error
|
||||
}
|
||||
|
||||
// IgnoreNotFound returns nil on NotFound errors.
|
||||
|
195
vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go
generated
vendored
Normal file
195
vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/metadata"
|
||||
)
|
||||
|
||||
// TODO(directxman12): we could rewrite this on top of the low-level REST
|
||||
// client to avoid the extra shallow copy at the end, but I'm not sure it's
|
||||
// worth it -- the metadata client deals with falling back to loading the whole
|
||||
// object on older API servers, etc, and we'd have to reproduce that.
|
||||
|
||||
// metadataClient is a client that reads & writes metadata-only requests to/from the API server.
|
||||
type metadataClient struct {
|
||||
client metadata.Interface
|
||||
restMapper meta.RESTMapper
|
||||
}
|
||||
|
||||
func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) {
|
||||
mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mapping.Scope.Name() == meta.RESTScopeNameRoot {
|
||||
return mc.client.Resource(mapping.Resource), nil
|
||||
}
|
||||
return mc.client.Resource(mapping.Resource).Namespace(ns), nil
|
||||
}
|
||||
|
||||
// Delete implements client.Client.
|
||||
func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadata)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deleteOpts := DeleteOptions{}
|
||||
deleteOpts.ApplyOptions(opts)
|
||||
|
||||
return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions())
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadata)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
deleteAllOfOpts := DeleteAllOfOptions{}
|
||||
deleteAllOfOpts.ApplyOptions(opts)
|
||||
|
||||
resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions())
|
||||
}
|
||||
|
||||
// Patch implements client.Client.
|
||||
func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadata)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
gvk := metadata.GroupVersionKind()
|
||||
resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchOpts := &PatchOptions{}
|
||||
patchOpts.ApplyOptions(opts)
|
||||
|
||||
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*metadata = *res
|
||||
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements client.Client.
|
||||
func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadata)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
gvk := metadata.GroupVersionKind()
|
||||
|
||||
resInt, err := mc.getResourceInterface(gvk, key.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*metadata = *res
|
||||
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
// List implements client.Client.
|
||||
func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadataList)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
gvk := metadata.GroupVersionKind()
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
listOpts := ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := resInt.List(ctx, *listOpts.AsListOptions())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*metadata = *res
|
||||
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
metadata, ok := obj.(*metav1.PartialObjectMetadata)
|
||||
if !ok {
|
||||
return fmt.Errorf("metadata client did not understand object: %T", obj)
|
||||
}
|
||||
|
||||
gvk := metadata.GroupVersionKind()
|
||||
resInt, err := mc.getResourceInterface(gvk, metadata.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchOpts := &PatchOptions{}
|
||||
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*metadata = *res
|
||||
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
|
||||
return nil
|
||||
}
|
254
vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
generated
vendored
Normal file
254
vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
generated
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// NewNamespacedClient wraps an existing client enforcing the namespace value.
|
||||
// All functions using this client will have the same namespace declared here.
|
||||
func NewNamespacedClient(c Client, ns string) Client {
|
||||
return &namespacedClient{
|
||||
client: c,
|
||||
namespace: ns,
|
||||
}
|
||||
}
|
||||
|
||||
var _ Client = &namespacedClient{}
|
||||
|
||||
// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value.
|
||||
type namespacedClient struct {
|
||||
namespace string
|
||||
client Client
|
||||
}
|
||||
|
||||
// Scheme returns the scheme this client is using.
|
||||
func (n *namespacedClient) Scheme() *runtime.Scheme {
|
||||
return n.client.Scheme()
|
||||
}
|
||||
|
||||
// RESTMapper returns the scheme this client is using.
|
||||
func (n *namespacedClient) RESTMapper() meta.RESTMapper {
|
||||
return n.client.RESTMapper()
|
||||
}
|
||||
|
||||
// isNamespaced returns true if the object is namespace scoped.
|
||||
// For unstructured objects the gvk is found from the object itself.
|
||||
// TODO: this is repetitive code. Remove this and use ojectutil.IsNamespaced.
|
||||
func isNamespaced(c Client, obj runtime.Object) (bool, error) {
|
||||
var gvk schema.GroupVersionKind
|
||||
var err error
|
||||
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
|
||||
|
||||
isUnstructured = isUnstructured || isUnstructuredList
|
||||
if isUnstructured {
|
||||
gvk = obj.GetObjectKind().GroupVersionKind()
|
||||
} else {
|
||||
gvk, err = apiutil.GVKForObject(obj, c.Scheme())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
gk := schema.GroupKind{
|
||||
Group: gvk.Group,
|
||||
Kind: gvk.Kind,
|
||||
}
|
||||
restmapping, err := c.RESTMapper().RESTMapping(gk)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get restmapping: %w", err)
|
||||
}
|
||||
scope := restmapping.Scope.Name()
|
||||
|
||||
if scope == "" {
|
||||
return false, errors.New("scope cannot be identified, empty scope returned")
|
||||
}
|
||||
|
||||
if scope != meta.RESTScopeNameRoot {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Create implements clinet.Client.
|
||||
func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != n.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(n.namespace)
|
||||
}
|
||||
return n.client.Create(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Update implements client.Client.
|
||||
func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != n.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(n.namespace)
|
||||
}
|
||||
return n.client.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Delete implements client.Client.
|
||||
func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != n.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(n.namespace)
|
||||
}
|
||||
return n.client.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
if isNamespaceScoped {
|
||||
opts = append(opts, InNamespace(n.namespace))
|
||||
}
|
||||
return n.client.DeleteAllOf(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch implements client.Client.
|
||||
func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != n.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(n.namespace)
|
||||
}
|
||||
return n.client.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
// Get implements client.Client.
|
||||
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
isNamespaceScoped, err := isNamespaced(n.client, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
if isNamespaceScoped {
|
||||
if key.Namespace != "" && key.Namespace != n.namespace {
|
||||
return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace)
|
||||
}
|
||||
key.Namespace = n.namespace
|
||||
}
|
||||
return n.client.Get(ctx, key, obj)
|
||||
}
|
||||
|
||||
// List implements client.Client.
|
||||
func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
if n.namespace != "" {
|
||||
opts = append(opts, InNamespace(n.namespace))
|
||||
}
|
||||
return n.client.List(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Status implements client.StatusClient.
|
||||
func (n *namespacedClient) Status() StatusWriter {
|
||||
return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n}
|
||||
}
|
||||
|
||||
// ensure namespacedClientStatusWriter implements client.StatusWriter.
|
||||
var _ StatusWriter = &namespacedClientStatusWriter{}
|
||||
|
||||
type namespacedClientStatusWriter struct {
|
||||
StatusClient StatusWriter
|
||||
namespace string
|
||||
namespacedclient Client
|
||||
}
|
||||
|
||||
// Update implements client.StatusWriter.
|
||||
func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != nsw.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(nsw.namespace)
|
||||
}
|
||||
return nsw.StatusClient.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch implements client.StatusWriter.
|
||||
func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the scope of the object: %v", err)
|
||||
}
|
||||
|
||||
objectNamespace := obj.GetNamespace()
|
||||
if objectNamespace != nsw.namespace && objectNamespace != "" {
|
||||
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
|
||||
}
|
||||
|
||||
if isNamespaceScoped && objectNamespace == "" {
|
||||
obj.SetNamespace(nsw.namespace)
|
||||
}
|
||||
return nsw.StatusClient.Patch(ctx, obj, patch, opts...)
|
||||
}
|
77
vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go
generated
vendored
Normal file
77
vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Object is a Kubernetes object, allows functions to work indistinctly with
|
||||
// any resource that implements both Object interfaces.
|
||||
//
|
||||
// Semantically, these are objects which are both serializable (runtime.Object)
|
||||
// and identifiable (metav1.Object) -- think any object which you could write
|
||||
// as YAML or JSON, and then `kubectl create`.
|
||||
//
|
||||
// Code-wise, this means that any object which embeds both ObjectMeta (which
|
||||
// provides metav1.Object) and TypeMeta (which provides half of runtime.Object)
|
||||
// and has a `DeepCopyObject` implementation (the other half of runtime.Object)
|
||||
// will implement this by default.
|
||||
//
|
||||
// For example, nearly all the built-in types are Objects, as well as all
|
||||
// KubeBuilder-generated CRDs (unless you do something real funky to them).
|
||||
//
|
||||
// By and large, most things that implement runtime.Object also implement
|
||||
// Object -- it's very rare to have *just* a runtime.Object implementation (the
|
||||
// cases tend to be funky built-in types like Webhook payloads that don't have
|
||||
// a `metadata` field).
|
||||
//
|
||||
// Notice that XYZList types are distinct: they implement ObjectList instead.
|
||||
type Object interface {
|
||||
metav1.Object
|
||||
runtime.Object
|
||||
}
|
||||
|
||||
// ObjectList is a Kubernetes object list, allows functions to work
|
||||
// indistinctly with any resource that implements both runtime.Object and
|
||||
// metav1.ListInterface interfaces.
|
||||
//
|
||||
// Semantically, this is any object which may be serialized (ObjectMeta), and
|
||||
// is a kubernetes list wrapper (has items, pagination fields, etc) -- think
|
||||
// the wrapper used in a response from a `kubectl list --output yaml` call.
|
||||
//
|
||||
// Code-wise, this means that any object which embedds both ListMeta (which
|
||||
// provides metav1.ListInterface) and TypeMeta (which provides half of
|
||||
// runtime.Object) and has a `DeepCopyObject` implementation (the other half of
|
||||
// runtime.Object) will implement this by default.
|
||||
//
|
||||
// For example, nearly all the built-in XYZList types are ObjectLists, as well
|
||||
// as the XYZList types for all KubeBuilder-generated CRDs (unless you do
|
||||
// something real funky to them).
|
||||
//
|
||||
// By and large, most things that are XYZList and implement runtime.Object also
|
||||
// implement ObjectList -- it's very rare to have *just* a runtime.Object
|
||||
// implementation (the cases tend to be funky built-in types like Webhook
|
||||
// payloads that don't have a `metadata` field).
|
||||
//
|
||||
// This is similar to Object, which is almost always implemented by the items
|
||||
// in the list themselves.
|
||||
type ObjectList interface {
|
||||
metav1.ListInterface
|
||||
runtime.Object
|
||||
}
|
35
vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
generated
vendored
35
vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
generated
vendored
@ -158,7 +158,7 @@ func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions {
|
||||
return o
|
||||
}
|
||||
|
||||
// ApplyToCreate implements CreateOption
|
||||
// ApplyToCreate implements CreateOption.
|
||||
func (o *CreateOptions) ApplyToCreate(co *CreateOptions) {
|
||||
if o.DryRun != nil {
|
||||
co.DryRun = o.DryRun
|
||||
@ -173,11 +173,6 @@ func (o *CreateOptions) ApplyToCreate(co *CreateOptions) {
|
||||
|
||||
var _ CreateOption = &CreateOptions{}
|
||||
|
||||
// CreateDryRunAll sets the "dry run" option to "all".
|
||||
//
|
||||
// Deprecated: Use DryRunAll
|
||||
var CreateDryRunAll = DryRunAll
|
||||
|
||||
// }}}
|
||||
|
||||
// {{{ Delete Options
|
||||
@ -244,7 +239,7 @@ func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions {
|
||||
|
||||
var _ DeleteOption = &DeleteOptions{}
|
||||
|
||||
// ApplyToDelete implements DeleteOption
|
||||
// ApplyToDelete implements DeleteOption.
|
||||
func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) {
|
||||
if o.GracePeriodSeconds != nil {
|
||||
do.GracePeriodSeconds = o.GracePeriodSeconds
|
||||
@ -354,7 +349,7 @@ type ListOptions struct {
|
||||
|
||||
var _ ListOption = &ListOptions{}
|
||||
|
||||
// ApplyToList implements ListOption for ListOptions
|
||||
// ApplyToList implements ListOption for ListOptions.
|
||||
func (o *ListOptions) ApplyToList(lo *ListOptions) {
|
||||
if o.LabelSelector != nil {
|
||||
lo.LabelSelector = o.LabelSelector
|
||||
@ -460,14 +455,6 @@ func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
|
||||
m.ApplyToList(&opts.ListOptions)
|
||||
}
|
||||
|
||||
// MatchingField filters the list operation on the given field selector
|
||||
// (or index in the case of cached lists).
|
||||
//
|
||||
// Deprecated: Use MatchingFields
|
||||
func MatchingField(name, val string) MatchingFields {
|
||||
return MatchingFields{name: val}
|
||||
}
|
||||
|
||||
// MatchingFields filters the list/delete operation on the given field Set
|
||||
// (or index in the case of cached lists).
|
||||
type MatchingFields fields.Set
|
||||
@ -582,7 +569,7 @@ func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions {
|
||||
|
||||
var _ UpdateOption = &UpdateOptions{}
|
||||
|
||||
// ApplyToUpdate implements UpdateOption
|
||||
// ApplyToUpdate implements UpdateOption.
|
||||
func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) {
|
||||
if o.DryRun != nil {
|
||||
uo.DryRun = o.DryRun
|
||||
@ -595,11 +582,6 @@ func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateDryRunAll sets the "dry run" option to "all".
|
||||
//
|
||||
// Deprecated: Use DryRunAll
|
||||
var UpdateDryRunAll = DryRunAll
|
||||
|
||||
// }}}
|
||||
|
||||
// {{{ Patch Options
|
||||
@ -654,7 +636,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions {
|
||||
|
||||
var _ PatchOption = &PatchOptions{}
|
||||
|
||||
// ApplyToPatch implements PatchOptions
|
||||
// ApplyToPatch implements PatchOptions.
|
||||
func (o *PatchOptions) ApplyToPatch(po *PatchOptions) {
|
||||
if o.DryRun != nil {
|
||||
po.DryRun = o.DryRun
|
||||
@ -682,11 +664,6 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) {
|
||||
opts.Force = &definitelyTrue
|
||||
}
|
||||
|
||||
// PatchDryRunAll sets the "dry run" option to "all".
|
||||
//
|
||||
// Deprecated: Use DryRunAll
|
||||
var PatchDryRunAll = DryRunAll
|
||||
|
||||
// }}}
|
||||
|
||||
// {{{ DeleteAllOf Options
|
||||
@ -711,7 +688,7 @@ func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOf
|
||||
|
||||
var _ DeleteAllOfOption = &DeleteAllOfOptions{}
|
||||
|
||||
// ApplyToDeleteAllOf implements DeleteAllOfOption
|
||||
// ApplyToDeleteAllOf implements DeleteAllOfOption.
|
||||
func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) {
|
||||
o.ApplyToList(&do.ListOptions)
|
||||
o.ApplyToDelete(&do.DeleteOptions)
|
||||
|
126
vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
generated
vendored
126
vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
generated
vendored
@ -17,19 +17,21 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
)
|
||||
|
||||
var (
|
||||
// Apply uses server-side apply to patch the given object.
|
||||
Apply = applyPatch{}
|
||||
Apply Patch = applyPatch{}
|
||||
|
||||
// Merge uses the raw object as a merge patch, without modifications.
|
||||
// Use MergeFrom if you wish to compute a diff instead.
|
||||
Merge = mergePatch{}
|
||||
Merge Patch = mergePatch{}
|
||||
)
|
||||
|
||||
type patch struct {
|
||||
@ -43,7 +45,7 @@ func (s *patch) Type() types.PatchType {
|
||||
}
|
||||
|
||||
// Data implements Patch.
|
||||
func (s *patch) Data(obj runtime.Object) ([]byte, error) {
|
||||
func (s *patch) Data(obj Object) ([]byte, error) {
|
||||
return s.data, nil
|
||||
}
|
||||
|
||||
@ -52,40 +54,128 @@ func RawPatch(patchType types.PatchType, data []byte) Patch {
|
||||
return &patch{patchType, data}
|
||||
}
|
||||
|
||||
// ConstantPatch constructs a new Patch with the given PatchType and data.
|
||||
// MergeFromWithOptimisticLock can be used if clients want to make sure a patch
|
||||
// is being applied to the latest resource version of an object.
|
||||
//
|
||||
// Deprecated: use RawPatch instead
|
||||
func ConstantPatch(patchType types.PatchType, data []byte) Patch {
|
||||
return RawPatch(patchType, data)
|
||||
// The behavior is similar to what an Update would do, without the need to send the
|
||||
// whole object. Usually this method is useful if you might have multiple clients
|
||||
// acting on the same object and the same API version, but with different versions of the Go structs.
|
||||
//
|
||||
// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C.
|
||||
// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not.
|
||||
type MergeFromWithOptimisticLock struct{}
|
||||
|
||||
// ApplyToMergeFrom applies this configuration to the given patch options.
|
||||
func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) {
|
||||
in.OptimisticLock = true
|
||||
}
|
||||
|
||||
// MergeFromOption is some configuration that modifies options for a merge-from patch data.
|
||||
type MergeFromOption interface {
|
||||
// ApplyToMergeFrom applies this configuration to the given patch options.
|
||||
ApplyToMergeFrom(*MergeFromOptions)
|
||||
}
|
||||
|
||||
// MergeFromOptions contains options to generate a merge-from patch data.
|
||||
type MergeFromOptions struct {
|
||||
// OptimisticLock, when true, includes `metadata.resourceVersion` into the final
|
||||
// patch data. If the `resourceVersion` field doesn't match what's stored,
|
||||
// the operation results in a conflict and clients will need to try again.
|
||||
OptimisticLock bool
|
||||
}
|
||||
|
||||
type mergeFromPatch struct {
|
||||
from runtime.Object
|
||||
patchType types.PatchType
|
||||
createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error)
|
||||
from Object
|
||||
opts MergeFromOptions
|
||||
}
|
||||
|
||||
// Type implements patch.
|
||||
// Type implements Patch.
|
||||
func (s *mergeFromPatch) Type() types.PatchType {
|
||||
return types.MergePatchType
|
||||
return s.patchType
|
||||
}
|
||||
|
||||
// Data implements Patch.
|
||||
func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) {
|
||||
originalJSON, err := json.Marshal(s.from)
|
||||
func (s *mergeFromPatch) Data(obj Object) ([]byte, error) {
|
||||
original := s.from
|
||||
modified := obj
|
||||
|
||||
if s.opts.OptimisticLock {
|
||||
version := original.GetResourceVersion()
|
||||
if len(version) == 0 {
|
||||
return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original)
|
||||
}
|
||||
|
||||
original = original.DeepCopyObject().(Object)
|
||||
original.SetResourceVersion("")
|
||||
|
||||
modified = modified.DeepCopyObject().(Object)
|
||||
modified.SetResourceVersion(version)
|
||||
}
|
||||
|
||||
originalJSON, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modifiedJSON, err := json.Marshal(obj)
|
||||
modifiedJSON, err := json.Marshal(modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := s.createPatch(originalJSON, modifiedJSON, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) {
|
||||
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) {
|
||||
return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct)
|
||||
}
|
||||
|
||||
// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base.
|
||||
func MergeFrom(obj runtime.Object) Patch {
|
||||
return &mergeFromPatch{obj}
|
||||
// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
|
||||
// When using MergeFrom, existing lists will be completely replaced by new lists.
|
||||
// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
|
||||
// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
|
||||
// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
|
||||
// the difference between merge-patch and strategic-merge-patch.
|
||||
func MergeFrom(obj Object) Patch {
|
||||
return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj}
|
||||
}
|
||||
|
||||
// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base.
|
||||
// See MergeFrom for more details.
|
||||
func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch {
|
||||
options := &MergeFromOptions{}
|
||||
for _, opt := range opts {
|
||||
opt.ApplyToMergeFrom(options)
|
||||
}
|
||||
return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options}
|
||||
}
|
||||
|
||||
// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base.
|
||||
// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields.
|
||||
// When using MergeFrom, existing lists will be completely replaced by new lists.
|
||||
// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type,
|
||||
// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`.
|
||||
// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on
|
||||
// the difference between merge-patch and strategic-merge-patch.
|
||||
// Please note, that CRDs don't support strategic-merge-patch, see
|
||||
// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility
|
||||
func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch {
|
||||
options := &MergeFromOptions{}
|
||||
for _, opt := range opts {
|
||||
opt.ApplyToMergeFrom(options)
|
||||
}
|
||||
return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options}
|
||||
}
|
||||
|
||||
// mergePatch uses a raw merge strategy to patch the object.
|
||||
@ -97,7 +187,7 @@ func (p mergePatch) Type() types.PatchType {
|
||||
}
|
||||
|
||||
// Data implements Patch.
|
||||
func (p mergePatch) Data(obj runtime.Object) ([]byte, error) {
|
||||
func (p mergePatch) Data(obj Object) ([]byte, error) {
|
||||
// NB(directxman12): we might technically want to be using an actual encoder
|
||||
// here (in case some more performant encoder is introduced) but this is
|
||||
// correct and sufficient for our uses (it's what the JSON serializer in
|
||||
@ -114,7 +204,7 @@ func (p applyPatch) Type() types.PatchType {
|
||||
}
|
||||
|
||||
// Data implements Patch.
|
||||
func (p applyPatch) Data(obj runtime.Object) ([]byte, error) {
|
||||
func (p applyPatch) Data(obj Object) ([]byte, error) {
|
||||
// NB(directxman12): we might technically want to be using an actual encoder
|
||||
// here (in case some more performant encoder is introduced) but this is
|
||||
// correct and sufficient for our uses (it's what the JSON serializer in
|
||||
|
100
vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go
generated
vendored
100
vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go
generated
vendored
@ -18,43 +18,123 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// DelegatingClient forms a Client by composing separate reader, writer and
|
||||
// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client.
|
||||
type NewDelegatingClientInput struct {
|
||||
CacheReader Reader
|
||||
Client Client
|
||||
UncachedObjects []Object
|
||||
CacheUnstructured bool
|
||||
}
|
||||
|
||||
// NewDelegatingClient creates a new delegating client.
|
||||
//
|
||||
// A delegating client forms a Client by composing separate reader, writer and
|
||||
// statusclient interfaces. This way, you can have an Client that reads from a
|
||||
// cache and writes to the API server.
|
||||
type DelegatingClient struct {
|
||||
func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) {
|
||||
uncachedGVKs := map[schema.GroupVersionKind]struct{}{}
|
||||
for _, obj := range in.UncachedObjects {
|
||||
gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncachedGVKs[gvk] = struct{}{}
|
||||
}
|
||||
|
||||
return &delegatingClient{
|
||||
scheme: in.Client.Scheme(),
|
||||
mapper: in.Client.RESTMapper(),
|
||||
Reader: &delegatingReader{
|
||||
CacheReader: in.CacheReader,
|
||||
ClientReader: in.Client,
|
||||
scheme: in.Client.Scheme(),
|
||||
uncachedGVKs: uncachedGVKs,
|
||||
cacheUnstructured: in.CacheUnstructured,
|
||||
},
|
||||
Writer: in.Client,
|
||||
StatusClient: in.Client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type delegatingClient struct {
|
||||
Reader
|
||||
Writer
|
||||
StatusClient
|
||||
|
||||
scheme *runtime.Scheme
|
||||
mapper meta.RESTMapper
|
||||
}
|
||||
|
||||
// DelegatingReader forms a Reader that will cause Get and List requests for
|
||||
// Scheme returns the scheme this client is using.
|
||||
func (d *delegatingClient) Scheme() *runtime.Scheme {
|
||||
return d.scheme
|
||||
}
|
||||
|
||||
// RESTMapper returns the rest mapper this client is using.
|
||||
func (d *delegatingClient) RESTMapper() meta.RESTMapper {
|
||||
return d.mapper
|
||||
}
|
||||
|
||||
// delegatingReader forms a Reader that will cause Get and List requests for
|
||||
// unstructured types to use the ClientReader while requests for any other type
|
||||
// of object with use the CacheReader. This avoids accidentally caching the
|
||||
// entire cluster in the common case of loading arbitrary unstructured objects
|
||||
// (e.g. from OwnerReferences).
|
||||
type DelegatingReader struct {
|
||||
type delegatingReader struct {
|
||||
CacheReader Reader
|
||||
ClientReader Reader
|
||||
|
||||
uncachedGVKs map[schema.GroupVersionKind]struct{}
|
||||
scheme *runtime.Scheme
|
||||
cacheUnstructured bool
|
||||
}
|
||||
|
||||
func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, d.scheme)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// TODO: this is producing unsafe guesses that don't actually work,
|
||||
// but it matches ~99% of the cases out there.
|
||||
if meta.IsListType(obj) {
|
||||
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
|
||||
}
|
||||
if _, isUncached := d.uncachedGVKs[gvk]; isUncached {
|
||||
return true, nil
|
||||
}
|
||||
if !d.cacheUnstructured {
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
|
||||
return isUnstructured || isUnstructuredList, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Get retrieves an obj for a given object key from the Kubernetes Cluster.
|
||||
func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error {
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
if isUnstructured {
|
||||
func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
if isUncached, err := d.shouldBypassCache(obj); err != nil {
|
||||
return err
|
||||
} else if isUncached {
|
||||
return d.ClientReader.Get(ctx, key, obj)
|
||||
}
|
||||
return d.CacheReader.Get(ctx, key, obj)
|
||||
}
|
||||
|
||||
// List retrieves list of objects for a given namespace and list options.
|
||||
func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOption) error {
|
||||
_, isUnstructured := list.(*unstructured.UnstructuredList)
|
||||
if isUnstructured {
|
||||
func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error {
|
||||
if isUncached, err := d.shouldBypassCache(list); err != nil {
|
||||
return err
|
||||
} else if isUncached {
|
||||
return d.ClientReader.List(ctx, list, opts...)
|
||||
}
|
||||
return d.CacheReader.List(ctx, list, opts...)
|
||||
|
36
vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
generated
vendored
36
vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
generated
vendored
@ -22,6 +22,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var _ Reader = &typedClient{}
|
||||
var _ Writer = &typedClient{}
|
||||
var _ StatusWriter = &typedClient{}
|
||||
|
||||
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
|
||||
// new clients at the time they are used, and caches the client.
|
||||
type typedClient struct {
|
||||
@ -29,8 +33,8 @@ type typedClient struct {
|
||||
paramCodec runtime.ParameterCodec
|
||||
}
|
||||
|
||||
// Create implements client.Client
|
||||
func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error {
|
||||
// Create implements client.Client.
|
||||
func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -47,8 +51,8 @@ func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...Cr
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
// Update implements client.Client
|
||||
func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.Client.
|
||||
func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -66,8 +70,8 @@ func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...Up
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
// Delete implements client.Client
|
||||
func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error {
|
||||
// Delete implements client.Client.
|
||||
func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -85,8 +89,8 @@ func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...De
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client
|
||||
func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error {
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -104,8 +108,8 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch implements client.Client
|
||||
func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.Client.
|
||||
func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -127,8 +131,8 @@ func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
// Get implements client.Client
|
||||
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error {
|
||||
// Get implements client.Client.
|
||||
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
r, err := c.cache.getResource(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -139,8 +143,8 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object
|
||||
Name(key.Name).Do(ctx).Into(obj)
|
||||
}
|
||||
|
||||
// List implements client.Client
|
||||
func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error {
|
||||
// List implements client.Client.
|
||||
func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
r, err := c.cache.getResource(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -156,7 +160,7 @@ func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...List
|
||||
}
|
||||
|
||||
// UpdateStatus used by StatusWriter to write status.
|
||||
func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -177,7 +181,7 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts
|
||||
}
|
||||
|
||||
// PatchStatus used by StatusWriter to write status.
|
||||
func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
o, err := c.cache.getObjMeta(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
|
36
vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
generated
vendored
36
vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
generated
vendored
@ -25,6 +25,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var _ Reader = &unstructuredClient{}
|
||||
var _ Writer = &unstructuredClient{}
|
||||
var _ StatusWriter = &unstructuredClient{}
|
||||
|
||||
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
|
||||
// new clients at the time they are used, and caches the client.
|
||||
type unstructuredClient struct {
|
||||
@ -32,8 +36,8 @@ type unstructuredClient struct {
|
||||
paramCodec runtime.ParameterCodec
|
||||
}
|
||||
|
||||
// Create implements client.Client
|
||||
func (uc *unstructuredClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error {
|
||||
// Create implements client.Client.
|
||||
func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -60,8 +64,8 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj runtime.Object, op
|
||||
return result
|
||||
}
|
||||
|
||||
// Update implements client.Client
|
||||
func (uc *unstructuredClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
// Update implements client.Client.
|
||||
func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -89,8 +93,8 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj runtime.Object, op
|
||||
return result
|
||||
}
|
||||
|
||||
// Delete implements client.Client
|
||||
func (uc *unstructuredClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error {
|
||||
// Delete implements client.Client.
|
||||
func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -112,8 +116,8 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj runtime.Object, op
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteAllOf implements client.Client
|
||||
func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error {
|
||||
// DeleteAllOf implements client.Client.
|
||||
func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -135,8 +139,8 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj runtime.Objec
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch implements client.Client
|
||||
func (uc *unstructuredClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
// Patch implements client.Client.
|
||||
func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -163,8 +167,8 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj runtime.Object, pat
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
// Get implements client.Client
|
||||
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error {
|
||||
// Get implements client.Client.
|
||||
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -189,8 +193,8 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj runtim
|
||||
return result
|
||||
}
|
||||
|
||||
// List implements client.Client
|
||||
func (uc *unstructuredClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error {
|
||||
// List implements client.Client.
|
||||
func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
|
||||
u, ok := obj.(*unstructured.UnstructuredList)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -217,7 +221,7 @@ func (uc *unstructuredClient) List(ctx context.Context, obj runtime.Object, opts
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error {
|
||||
func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
|
||||
_, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
@ -239,7 +243,7 @@ func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj runtime.Obje
|
||||
Into(obj)
|
||||
}
|
||||
|
||||
func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error {
|
||||
func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return fmt.Errorf("unstructured client did not understand object: %T", obj)
|
||||
|
118
vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go
generated
vendored
Normal file
118
vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// NewWithWatch returns a new WithWatch.
|
||||
func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) {
|
||||
client, err := newClient(config, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &watchingClient{client: client, dynamic: dynamicClient}, nil
|
||||
}
|
||||
|
||||
type watchingClient struct {
|
||||
*client
|
||||
dynamic dynamic.Interface
|
||||
}
|
||||
|
||||
func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) {
|
||||
switch l := list.(type) {
|
||||
case *unstructured.UnstructuredList:
|
||||
return w.unstructuredWatch(ctx, l, opts...)
|
||||
case *metav1.PartialObjectMetadataList:
|
||||
return w.metadataWatch(ctx, l, opts...)
|
||||
default:
|
||||
return w.typedWatch(ctx, l, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watchingClient) listOpts(opts ...ListOption) ListOptions {
|
||||
listOpts := ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
if listOpts.Raw == nil {
|
||||
listOpts.Raw = &metav1.ListOptions{}
|
||||
}
|
||||
listOpts.Raw.Watch = true
|
||||
|
||||
return listOpts
|
||||
}
|
||||
|
||||
func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) {
|
||||
gvk := obj.GroupVersionKind()
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
listOpts := w.listOpts(opts...)
|
||||
|
||||
resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resInt.Watch(ctx, *listOpts.AsListOptions())
|
||||
}
|
||||
|
||||
func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) {
|
||||
gvk := obj.GroupVersionKind()
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
r, err := w.client.unstructuredClient.cache.getResource(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listOpts := w.listOpts(opts...)
|
||||
|
||||
if listOpts.Namespace != "" && r.isNamespaced() {
|
||||
return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions())
|
||||
}
|
||||
return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions())
|
||||
}
|
||||
|
||||
func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) {
|
||||
r, err := w.client.typedClient.cache.getResource(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listOpts := w.listOpts(opts...)
|
||||
|
||||
return r.Get().
|
||||
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
|
||||
Resource(r.resource()).
|
||||
VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec).
|
||||
Watch(ctx)
|
||||
}
|
270
vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go
generated
vendored
Normal file
270
vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
)
|
||||
|
||||
// Cluster provides various methods to interact with a cluster.
|
||||
type Cluster interface {
|
||||
// SetFields will set any dependencies on an object for which the object has implemented the inject
|
||||
// interface - e.g. inject.Client.
|
||||
// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10.
|
||||
SetFields(interface{}) error
|
||||
|
||||
// GetConfig returns an initialized Config
|
||||
GetConfig() *rest.Config
|
||||
|
||||
// GetScheme returns an initialized Scheme
|
||||
GetScheme() *runtime.Scheme
|
||||
|
||||
// GetClient returns a client configured with the Config. This client may
|
||||
// not be a fully "direct" client -- it may read from a cache, for
|
||||
// instance. See Options.NewClient for more information on how the default
|
||||
// implementation works.
|
||||
GetClient() client.Client
|
||||
|
||||
// GetFieldIndexer returns a client.FieldIndexer configured with the client
|
||||
GetFieldIndexer() client.FieldIndexer
|
||||
|
||||
// GetCache returns a cache.Cache
|
||||
GetCache() cache.Cache
|
||||
|
||||
// GetEventRecorderFor returns a new EventRecorder for the provided name
|
||||
GetEventRecorderFor(name string) record.EventRecorder
|
||||
|
||||
// GetRESTMapper returns a RESTMapper
|
||||
GetRESTMapper() meta.RESTMapper
|
||||
|
||||
// GetAPIReader returns a reader that will be configured to use the API server.
|
||||
// This should be used sparingly and only when the client does not fit your
|
||||
// use case.
|
||||
GetAPIReader() client.Reader
|
||||
|
||||
// Start starts the cluster
|
||||
Start(ctx context.Context) error
|
||||
}
|
||||
|
||||
// Options are the possible options that can be configured for a Cluster.
|
||||
type Options struct {
|
||||
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
|
||||
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
|
||||
// idea to pass your own scheme in. See the documentation in pkg/scheme for more information.
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
// MapperProvider provides the rest mapper used to map go types to Kubernetes APIs
|
||||
MapperProvider func(c *rest.Config) (meta.RESTMapper, error)
|
||||
|
||||
// Logger is the logger that should be used by this Cluster.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
Logger logr.Logger
|
||||
|
||||
// SyncPeriod determines the minimum frequency at which watched resources are
|
||||
// reconciled. A lower period will correct entropy more quickly, but reduce
|
||||
// responsiveness to change if there are many watched resources. Change this
|
||||
// value only if you know what you are doing. Defaults to 10 hours if unset.
|
||||
// there will a 10 percent jitter between the SyncPeriod of all controllers
|
||||
// so that all controllers will not send list requests simultaneously.
|
||||
SyncPeriod *time.Duration
|
||||
|
||||
// Namespace if specified restricts the manager's cache to watch objects in
|
||||
// the desired namespace Defaults to all namespaces
|
||||
//
|
||||
// Note: If a namespace is specified, controllers can still Watch for a
|
||||
// cluster-scoped resource (e.g Node). For namespaced resources the cache
|
||||
// will only hold objects from the desired namespace.
|
||||
Namespace string
|
||||
|
||||
// NewCache is the function that will create the cache to be used
|
||||
// by the manager. If not set this will use the default new cache function.
|
||||
NewCache cache.NewCacheFunc
|
||||
|
||||
// NewClient is the func that creates the client to be used by the manager.
|
||||
// If not set this will create the default DelegatingClient that will
|
||||
// use the cache for reads and the client for writes.
|
||||
NewClient NewClientFunc
|
||||
|
||||
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
|
||||
// for the given objects.
|
||||
ClientDisableCacheFor []client.Object
|
||||
|
||||
// DryRunClient specifies whether the client should be configured to enforce
|
||||
// dryRun mode.
|
||||
DryRunClient bool
|
||||
|
||||
// EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API
|
||||
// Use this to customize the event correlator and spam filter
|
||||
//
|
||||
// Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers
|
||||
// is shorter than the lifetime of your process.
|
||||
EventBroadcaster record.EventBroadcaster
|
||||
|
||||
// makeBroadcaster allows deferring the creation of the broadcaster to
|
||||
// avoid leaking goroutines if we never call Start on this manager. It also
|
||||
// returns whether or not this is a "owned" broadcaster, and as such should be
|
||||
// stopped with the manager.
|
||||
makeBroadcaster intrec.EventBroadcasterProducer
|
||||
|
||||
// Dependency injection for testing
|
||||
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error)
|
||||
}
|
||||
|
||||
// Option can be used to manipulate Options.
|
||||
type Option func(*Options)
|
||||
|
||||
// New constructs a brand new cluster.
|
||||
func New(config *rest.Config, opts ...Option) (Cluster, error) {
|
||||
if config == nil {
|
||||
return nil, errors.New("must specify Config")
|
||||
}
|
||||
|
||||
options := Options{}
|
||||
for _, opt := range opts {
|
||||
opt(&options)
|
||||
}
|
||||
options = setOptionsDefaults(options)
|
||||
|
||||
// Create the mapper provider
|
||||
mapper, err := options.MapperProvider(config)
|
||||
if err != nil {
|
||||
options.Logger.Error(err, "Failed to get API Group-Resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the cache for the cached read client and registering informers
|
||||
cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper}
|
||||
|
||||
apiReader, err := client.New(config, clientOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if options.DryRunClient {
|
||||
writeObj = client.NewDryRunClient(writeObj)
|
||||
}
|
||||
|
||||
// Create the recorder provider to inject event recorders for the components.
|
||||
// TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific
|
||||
// to the particular controller that it's being injected into, rather than a generic one like is here.
|
||||
recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cluster{
|
||||
config: config,
|
||||
scheme: options.Scheme,
|
||||
cache: cache,
|
||||
fieldIndexes: cache,
|
||||
client: writeObj,
|
||||
apiReader: apiReader,
|
||||
recorderProvider: recorderProvider,
|
||||
mapper: mapper,
|
||||
logger: options.Logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// setOptionsDefaults set default values for Options fields.
|
||||
func setOptionsDefaults(options Options) Options {
|
||||
// Use the Kubernetes client-go scheme if none is specified
|
||||
if options.Scheme == nil {
|
||||
options.Scheme = scheme.Scheme
|
||||
}
|
||||
|
||||
if options.MapperProvider == nil {
|
||||
options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) {
|
||||
return apiutil.NewDynamicRESTMapper(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow users to define how to create a new client
|
||||
if options.NewClient == nil {
|
||||
options.NewClient = DefaultNewClient
|
||||
}
|
||||
|
||||
// Allow newCache to be mocked
|
||||
if options.NewCache == nil {
|
||||
options.NewCache = cache.New
|
||||
}
|
||||
|
||||
// Allow newRecorderProvider to be mocked
|
||||
if options.newRecorderProvider == nil {
|
||||
options.newRecorderProvider = intrec.NewProvider
|
||||
}
|
||||
|
||||
// This is duplicated with pkg/manager, we need it here to provide
|
||||
// the user with an EventBroadcaster and there for the Leader election
|
||||
if options.EventBroadcaster == nil {
|
||||
// defer initialization to avoid leaking by default
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return record.NewBroadcaster(), true
|
||||
}
|
||||
} else {
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return options.EventBroadcaster, false
|
||||
}
|
||||
}
|
||||
|
||||
if options.Logger == nil {
|
||||
options.Logger = logf.RuntimeLog.WithName("cluster")
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// NewClientFunc allows a user to define how to create a client.
|
||||
type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error)
|
||||
|
||||
// DefaultNewClient creates the default caching client.
|
||||
func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
|
||||
c, err := client.New(config, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewDelegatingClient(client.NewDelegatingClientInput{
|
||||
CacheReader: cache,
|
||||
Client: c,
|
||||
UncachedObjects: uncachedObjects,
|
||||
})
|
||||
}
|
128
vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go
generated
vendored
Normal file
128
vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
)
|
||||
|
||||
type cluster struct {
|
||||
// config is the rest.config used to talk to the apiserver. Required.
|
||||
config *rest.Config
|
||||
|
||||
// scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults
|
||||
// to scheme.scheme.
|
||||
scheme *runtime.Scheme
|
||||
|
||||
cache cache.Cache
|
||||
|
||||
// TODO(directxman12): Provide an escape hatch to get individual indexers
|
||||
// client is the client injected into Controllers (and EventHandlers, Sources and Predicates).
|
||||
client client.Client
|
||||
|
||||
// apiReader is the reader that will make requests to the api server and not the cache.
|
||||
apiReader client.Reader
|
||||
|
||||
// fieldIndexes knows how to add field indexes over the Cache used by this controller,
|
||||
// which can later be consumed via field selectors from the injected client.
|
||||
fieldIndexes client.FieldIndexer
|
||||
|
||||
// recorderProvider is used to generate event recorders that will be injected into Controllers
|
||||
// (and EventHandlers, Sources and Predicates).
|
||||
recorderProvider *intrec.Provider
|
||||
|
||||
// mapper is used to map resources to kind, and map kind and version.
|
||||
mapper meta.RESTMapper
|
||||
|
||||
// Logger is the logger that should be used by this manager.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (c *cluster) SetFields(i interface{}) error {
|
||||
if _, err := inject.ConfigInto(c.config, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.ClientInto(c.client, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.APIReaderInto(c.apiReader, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.SchemeInto(c.scheme, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.CacheInto(c.cache, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.MapperInto(c.mapper, i); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cluster) GetConfig() *rest.Config {
|
||||
return c.config
|
||||
}
|
||||
|
||||
func (c *cluster) GetClient() client.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
func (c *cluster) GetScheme() *runtime.Scheme {
|
||||
return c.scheme
|
||||
}
|
||||
|
||||
func (c *cluster) GetFieldIndexer() client.FieldIndexer {
|
||||
return c.fieldIndexes
|
||||
}
|
||||
|
||||
func (c *cluster) GetCache() cache.Cache {
|
||||
return c.cache
|
||||
}
|
||||
|
||||
func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return c.recorderProvider.GetEventRecorderFor(name)
|
||||
}
|
||||
|
||||
func (c *cluster) GetRESTMapper() meta.RESTMapper {
|
||||
return c.mapper
|
||||
}
|
||||
|
||||
func (c *cluster) GetAPIReader() client.Reader {
|
||||
return c.apiReader
|
||||
}
|
||||
|
||||
func (c *cluster) GetLogger() logr.Logger {
|
||||
return c.logger
|
||||
}
|
||||
|
||||
func (c *cluster) Start(ctx context.Context) error {
|
||||
defer c.recorderProvider.Stop(ctx)
|
||||
return c.cache.Start(ctx)
|
||||
}
|
112
vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
generated
vendored
Normal file
112
vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
ioutil "io/ioutil"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
)
|
||||
|
||||
// ControllerManagerConfiguration defines the functions necessary to parse a config file
|
||||
// and to configure the Options struct for the ctrl.Manager.
|
||||
type ControllerManagerConfiguration interface {
|
||||
runtime.Object
|
||||
|
||||
// Complete returns the versioned configuration
|
||||
Complete() (v1alpha1.ControllerManagerConfigurationSpec, error)
|
||||
}
|
||||
|
||||
// DeferredFileLoader is used to configure the decoder for loading controller
|
||||
// runtime component config types.
|
||||
type DeferredFileLoader struct {
|
||||
ControllerManagerConfiguration
|
||||
path string
|
||||
scheme *runtime.Scheme
|
||||
once sync.Once
|
||||
err error
|
||||
}
|
||||
|
||||
// File will set up the deferred file loader for the configuration
|
||||
// this will also configure the defaults for the loader if nothing is
|
||||
//
|
||||
// Defaults:
|
||||
// Path: "./config.yaml"
|
||||
// Kind: GenericControllerManagerConfiguration
|
||||
func File() *DeferredFileLoader {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
||||
return &DeferredFileLoader{
|
||||
path: "./config.yaml",
|
||||
ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{},
|
||||
scheme: scheme,
|
||||
}
|
||||
}
|
||||
|
||||
// Complete will use sync.Once to set the scheme.
|
||||
func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) {
|
||||
d.once.Do(d.loadFile)
|
||||
if d.err != nil {
|
||||
return v1alpha1.ControllerManagerConfigurationSpec{}, d.err
|
||||
}
|
||||
return d.ControllerManagerConfiguration.Complete()
|
||||
}
|
||||
|
||||
// AtPath will set the path to load the file for the decoder.
|
||||
func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader {
|
||||
d.path = path
|
||||
return d
|
||||
}
|
||||
|
||||
// OfKind will set the type to be used for decoding the file into.
|
||||
func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader {
|
||||
d.ControllerManagerConfiguration = obj
|
||||
return d
|
||||
}
|
||||
|
||||
// InjectScheme will configure the scheme to be used for decoding the file.
|
||||
func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error {
|
||||
d.scheme = scheme
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadFile is used from the mutex.Once to load the file.
|
||||
func (d *DeferredFileLoader) loadFile() {
|
||||
if d.scheme == nil {
|
||||
d.err = fmt.Errorf("scheme not supplied to controller configuration loader")
|
||||
return
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(d.path)
|
||||
if err != nil {
|
||||
d.err = fmt.Errorf("could not read file at %s", d.path)
|
||||
return
|
||||
}
|
||||
|
||||
codecs := serializer.NewCodecFactory(d.scheme)
|
||||
|
||||
// Regardless of if the bytes are of any external version,
|
||||
// it will be read successfully and converted into the internal version
|
||||
if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil {
|
||||
d.err = fmt.Errorf("could not decode file into runtime.Object")
|
||||
}
|
||||
}
|
25
vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go
generated
vendored
Normal file
25
vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config contains functionality for interacting with ComponentConfig
|
||||
// files
|
||||
//
|
||||
// DeferredFileLoader
|
||||
//
|
||||
// This uses a deferred file decoding allowing you to chain your configuration
|
||||
// setup. You can pass this into manager.Options#File and it will load your
|
||||
// config.
|
||||
package config
|
20
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go
generated
vendored
Normal file
20
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 provides the ControllerManagerConfiguration used for
|
||||
// configuring ctrl.Manager
|
||||
// +kubebuilder:object:generate=true
|
||||
package v1alpha1
|
37
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
generated
vendored
Normal file
37
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ControllerManagerConfiguration{})
|
||||
}
|
157
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
generated
vendored
Normal file
157
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
|
||||
)
|
||||
|
||||
// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration.
|
||||
type ControllerManagerConfigurationSpec struct {
|
||||
// SyncPeriod determines the minimum frequency at which watched resources are
|
||||
// reconciled. A lower period will correct entropy more quickly, but reduce
|
||||
// responsiveness to change if there are many watched resources. Change this
|
||||
// value only if you know what you are doing. Defaults to 10 hours if unset.
|
||||
// there will a 10 percent jitter between the SyncPeriod of all controllers
|
||||
// so that all controllers will not send list requests simultaneously.
|
||||
// +optional
|
||||
SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"`
|
||||
|
||||
// LeaderElection is the LeaderElection config to be used when configuring
|
||||
// the manager.Manager leader election
|
||||
// +optional
|
||||
LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
// the desired namespace Defaults to all namespaces
|
||||
//
|
||||
// Note: If a namespace is specified, controllers can still Watch for a
|
||||
// cluster-scoped resource (e.g Node). For namespaced resources the cache
|
||||
// will only hold objects from the desired namespace.
|
||||
// +optional
|
||||
CacheNamespace string `json:"cacheNamespace,omitempty"`
|
||||
|
||||
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
|
||||
// To disable graceful shutdown, set to time.Duration(0)
|
||||
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
|
||||
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
|
||||
GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"`
|
||||
|
||||
// Controller contains global configuration options for controllers
|
||||
// registered within this manager.
|
||||
// +optional
|
||||
Controller *ControllerConfigurationSpec `json:"controller,omitempty"`
|
||||
|
||||
// Metrics contains thw controller metrics configuration
|
||||
// +optional
|
||||
Metrics ControllerMetrics `json:"metrics,omitempty"`
|
||||
|
||||
// Health contains the controller health configuration
|
||||
// +optional
|
||||
Health ControllerHealth `json:"health,omitempty"`
|
||||
|
||||
// Webhook contains the controllers webhook configuration
|
||||
// +optional
|
||||
Webhook ControllerWebhook `json:"webhook,omitempty"`
|
||||
}
|
||||
|
||||
// ControllerConfigurationSpec defines the global configuration for
|
||||
// controllers registered with the manager.
|
||||
type ControllerConfigurationSpec struct {
|
||||
// GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation
|
||||
// allowed for that controller.
|
||||
//
|
||||
// When a controller is registered within this manager using the builder utilities,
|
||||
// users have to specify the type the controller reconciles in the For(...) call.
|
||||
// If the object's kind passed matches one of the keys in this map, the concurrency
|
||||
// for that controller is set to the number specified.
|
||||
//
|
||||
// The key is expected to be consistent in form with GroupKind.String(),
|
||||
// e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`.
|
||||
//
|
||||
// +optional
|
||||
GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"`
|
||||
|
||||
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
|
||||
// Defaults to 2 minutes if not set.
|
||||
// +optional
|
||||
CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"`
|
||||
}
|
||||
|
||||
// ControllerMetrics defines the metrics configs.
|
||||
type ControllerMetrics struct {
|
||||
// BindAddress is the TCP address that the controller should bind to
|
||||
// for serving prometheus metrics.
|
||||
// It can be set to "0" to disable the metrics serving.
|
||||
// +optional
|
||||
BindAddress string `json:"bindAddress,omitempty"`
|
||||
}
|
||||
|
||||
// ControllerHealth defines the health configs.
|
||||
type ControllerHealth struct {
|
||||
// HealthProbeBindAddress is the TCP address that the controller should bind to
|
||||
// for serving health probes
|
||||
// +optional
|
||||
HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"`
|
||||
|
||||
// ReadinessEndpointName, defaults to "readyz"
|
||||
// +optional
|
||||
ReadinessEndpointName string `json:"readinessEndpointName,omitempty"`
|
||||
|
||||
// LivenessEndpointName, defaults to "healthz"
|
||||
// +optional
|
||||
LivenessEndpointName string `json:"livenessEndpointName,omitempty"`
|
||||
}
|
||||
|
||||
// ControllerWebhook defines the webhook server for the controller.
|
||||
type ControllerWebhook struct {
|
||||
// Port is the port that the webhook server serves at.
|
||||
// It is used to set webhook.Server.Port.
|
||||
// +optional
|
||||
Port *int `json:"port,omitempty"`
|
||||
|
||||
// Host is the hostname that the webhook server binds to.
|
||||
// It is used to set webhook.Server.Host.
|
||||
// +optional
|
||||
Host string `json:"host,omitempty"`
|
||||
|
||||
// CertDir is the directory that contains the server key and certificate.
|
||||
// if not set, webhook server would look up the server key and certificate in
|
||||
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
|
||||
// must be named tls.key and tls.crt, respectively.
|
||||
// +optional
|
||||
CertDir string `json:"certDir,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API.
|
||||
type ControllerManagerConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// ControllerManagerConfiguration returns the contfigurations for controllers
|
||||
ControllerManagerConfigurationSpec `json:",inline"`
|
||||
}
|
||||
|
||||
// Complete returns the configuration for controller-runtime.
|
||||
func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) {
|
||||
return *c, nil
|
||||
}
|
152
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
152
vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
configv1alpha1 "k8s.io/component-base/config/v1alpha1"
|
||||
timex "time"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) {
|
||||
*out = *in
|
||||
if in.GroupKindConcurrency != nil {
|
||||
in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency
|
||||
*out = make(map[string]int, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.CacheSyncTimeout != nil {
|
||||
in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout
|
||||
*out = new(timex.Duration)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec.
|
||||
func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerConfigurationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth.
|
||||
func (in *ControllerHealth) DeepCopy() *ControllerHealth {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerHealth)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration.
|
||||
func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerManagerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) {
|
||||
*out = *in
|
||||
if in.SyncPeriod != nil {
|
||||
in, out := &in.SyncPeriod, &out.SyncPeriod
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.LeaderElection != nil {
|
||||
in, out := &in.LeaderElection, &out.LeaderElection
|
||||
*out = new(configv1alpha1.LeaderElectionConfiguration)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GracefulShutdownTimeout != nil {
|
||||
in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.Controller != nil {
|
||||
in, out := &in.Controller, &out.Controller
|
||||
*out = new(ControllerConfigurationSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.Metrics = in.Metrics
|
||||
out.Health = in.Health
|
||||
in.Webhook.DeepCopyInto(&out.Webhook)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec.
|
||||
func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerManagerConfigurationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics.
|
||||
func (in *ControllerMetrics) DeepCopy() *ControllerMetrics {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerMetrics)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) {
|
||||
*out = *in
|
||||
if in.Port != nil {
|
||||
in, out := &in.Port, &out.Port
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook.
|
||||
func (in *ControllerWebhook) DeepCopy() *ControllerWebhook {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ControllerWebhook)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
43
vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
generated
vendored
43
vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
generated
vendored
@ -17,8 +17,11 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
|
||||
@ -29,7 +32,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// Options are the arguments for creating a new Controller
|
||||
// Options are the arguments for creating a new Controller.
|
||||
type Options struct {
|
||||
// MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
|
||||
MaxConcurrentReconciles int
|
||||
@ -41,6 +44,14 @@ type Options struct {
|
||||
// Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting.
|
||||
// The overall is a token bucket and the per-item is exponential.
|
||||
RateLimiter ratelimiter.RateLimiter
|
||||
|
||||
// Log is the logger used for this controller and passed to each reconciliation
|
||||
// request via the context field.
|
||||
Log logr.Logger
|
||||
|
||||
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
|
||||
// Defaults to 2 minutes if not set.
|
||||
CacheSyncTimeout time.Duration
|
||||
}
|
||||
|
||||
// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests
|
||||
@ -59,9 +70,12 @@ type Controller interface {
|
||||
// EventHandler if all provided Predicates evaluate to true.
|
||||
Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error
|
||||
|
||||
// Start starts the controller. Start blocks until stop is closed or a
|
||||
// Start starts the controller. Start blocks until the context is closed or a
|
||||
// controller has an error starting.
|
||||
Start(stop <-chan struct{}) error
|
||||
Start(ctx context.Context) error
|
||||
|
||||
// GetLogger returns this controller logger prefilled with basic information.
|
||||
GetLogger() logr.Logger
|
||||
}
|
||||
|
||||
// New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have
|
||||
@ -87,10 +101,18 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
|
||||
return nil, fmt.Errorf("must specify Name for Controller")
|
||||
}
|
||||
|
||||
if options.Log == nil {
|
||||
options.Log = mgr.GetLogger()
|
||||
}
|
||||
|
||||
if options.MaxConcurrentReconciles <= 0 {
|
||||
options.MaxConcurrentReconciles = 1
|
||||
}
|
||||
|
||||
if options.CacheSyncTimeout == 0 {
|
||||
options.CacheSyncTimeout = 2 * time.Minute
|
||||
}
|
||||
|
||||
if options.RateLimiter == nil {
|
||||
options.RateLimiter = workqueue.DefaultControllerRateLimiter()
|
||||
}
|
||||
@ -101,20 +123,15 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
|
||||
}
|
||||
|
||||
// Create controller with dependencies set
|
||||
c := &controller.Controller{
|
||||
Do: options.Reconciler,
|
||||
Cache: mgr.GetCache(),
|
||||
Config: mgr.GetConfig(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Client: mgr.GetClient(),
|
||||
Recorder: mgr.GetEventRecorderFor(name),
|
||||
return &controller.Controller{
|
||||
Do: options.Reconciler,
|
||||
MakeQueue: func() workqueue.RateLimitingInterface {
|
||||
return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name)
|
||||
},
|
||||
MaxConcurrentReconciles: options.MaxConcurrentReconciles,
|
||||
CacheSyncTimeout: options.CacheSyncTimeout,
|
||||
SetFields: mgr.SetFields,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
Log: options.Log.WithName("controller").WithName(name),
|
||||
}, nil
|
||||
}
|
||||
|
30
vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go
generated
vendored
30
vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go
generated
vendored
@ -16,45 +16,30 @@ limitations under the License.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
import "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated
|
||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
||||
type CreateEvent struct {
|
||||
// Meta is the ObjectMeta of the Kubernetes Type that was created
|
||||
Meta metav1.Object
|
||||
|
||||
// Object is the object from the event
|
||||
Object runtime.Object
|
||||
Object client.Object
|
||||
}
|
||||
|
||||
// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated
|
||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
||||
type UpdateEvent struct {
|
||||
// MetaOld is the ObjectMeta of the Kubernetes Type that was updated (before the update)
|
||||
MetaOld metav1.Object
|
||||
|
||||
// ObjectOld is the object from the event
|
||||
ObjectOld runtime.Object
|
||||
|
||||
// MetaNew is the ObjectMeta of the Kubernetes Type that was updated (after the update)
|
||||
MetaNew metav1.Object
|
||||
ObjectOld client.Object
|
||||
|
||||
// ObjectNew is the object from the event
|
||||
ObjectNew runtime.Object
|
||||
ObjectNew client.Object
|
||||
}
|
||||
|
||||
// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated
|
||||
// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler.
|
||||
type DeleteEvent struct {
|
||||
// Meta is the ObjectMeta of the Kubernetes Type that was deleted
|
||||
Meta metav1.Object
|
||||
|
||||
// Object is the object from the event
|
||||
Object runtime.Object
|
||||
Object client.Object
|
||||
|
||||
// DeleteStateUnknown is true if the Delete event was missed but we identified the object
|
||||
// as having been deleted.
|
||||
@ -65,9 +50,6 @@ type DeleteEvent struct {
|
||||
// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an
|
||||
// handler.EventHandler.
|
||||
type GenericEvent struct {
|
||||
// Meta is the ObjectMeta of a Kubernetes Type this event is for
|
||||
Meta metav1.Object
|
||||
|
||||
// Object is the object from the event
|
||||
Object runtime.Object
|
||||
Object client.Object
|
||||
}
|
||||
|
49
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go
generated
vendored
49
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
||||
|
||||
var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject")
|
||||
|
||||
type empty struct{}
|
||||
|
||||
var _ EventHandler = &EnqueueRequestForObject{}
|
||||
|
||||
// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event.
|
||||
@ -33,59 +35,56 @@ var _ EventHandler = &EnqueueRequestForObject{}
|
||||
// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource.
|
||||
type EnqueueRequestForObject struct{}
|
||||
|
||||
// Create implements EventHandler
|
||||
// Create implements EventHandler.
|
||||
func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
if evt.Meta == nil {
|
||||
if evt.Object == nil {
|
||||
enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt)
|
||||
return
|
||||
}
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: evt.Meta.GetName(),
|
||||
Namespace: evt.Meta.GetNamespace(),
|
||||
Name: evt.Object.GetName(),
|
||||
Namespace: evt.Object.GetNamespace(),
|
||||
}})
|
||||
}
|
||||
|
||||
// Update implements EventHandler
|
||||
// Update implements EventHandler.
|
||||
func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
if evt.MetaOld != nil {
|
||||
switch {
|
||||
case evt.ObjectNew != nil:
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: evt.MetaOld.GetName(),
|
||||
Namespace: evt.MetaOld.GetNamespace(),
|
||||
Name: evt.ObjectNew.GetName(),
|
||||
Namespace: evt.ObjectNew.GetNamespace(),
|
||||
}})
|
||||
} else {
|
||||
enqueueLog.Error(nil, "UpdateEvent received with no old metadata", "event", evt)
|
||||
}
|
||||
|
||||
if evt.MetaNew != nil {
|
||||
case evt.ObjectOld != nil:
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: evt.MetaNew.GetName(),
|
||||
Namespace: evt.MetaNew.GetNamespace(),
|
||||
Name: evt.ObjectOld.GetName(),
|
||||
Namespace: evt.ObjectOld.GetNamespace(),
|
||||
}})
|
||||
} else {
|
||||
enqueueLog.Error(nil, "UpdateEvent received with no new metadata", "event", evt)
|
||||
default:
|
||||
enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete implements EventHandler
|
||||
// Delete implements EventHandler.
|
||||
func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
if evt.Meta == nil {
|
||||
if evt.Object == nil {
|
||||
enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt)
|
||||
return
|
||||
}
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: evt.Meta.GetName(),
|
||||
Namespace: evt.Meta.GetNamespace(),
|
||||
Name: evt.Object.GetName(),
|
||||
Namespace: evt.Object.GetNamespace(),
|
||||
}})
|
||||
}
|
||||
|
||||
// Generic implements EventHandler
|
||||
// Generic implements EventHandler.
|
||||
func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
if evt.Meta == nil {
|
||||
if evt.Object == nil {
|
||||
enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt)
|
||||
return
|
||||
}
|
||||
q.Add(reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: evt.Meta.GetName(),
|
||||
Namespace: evt.Meta.GetNamespace(),
|
||||
Name: evt.Object.GetName(),
|
||||
Namespace: evt.Object.GetNamespace(),
|
||||
}})
|
||||
}
|
||||
|
88
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
generated
vendored
88
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
generated
vendored
@ -17,15 +17,16 @@ limitations under the License.
|
||||
package handler
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
)
|
||||
|
||||
var _ EventHandler = &EnqueueRequestsFromMapFunc{}
|
||||
// MapFunc is the signature required for enqueueing requests from a generic function.
|
||||
// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler.
|
||||
type MapFunc func(client.Object) []reconcile.Request
|
||||
|
||||
// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection
|
||||
// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects
|
||||
@ -37,69 +38,60 @@ var _ EventHandler = &EnqueueRequestsFromMapFunc{}
|
||||
//
|
||||
// For UpdateEvents which contain both a new and old object, the transformation function is run on both
|
||||
// objects and both sets of Requests are enqueue.
|
||||
type EnqueueRequestsFromMapFunc struct {
|
||||
func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler {
|
||||
return &enqueueRequestsFromMapFunc{
|
||||
toRequests: fn,
|
||||
}
|
||||
}
|
||||
|
||||
var _ EventHandler = &enqueueRequestsFromMapFunc{}
|
||||
|
||||
type enqueueRequestsFromMapFunc struct {
|
||||
// Mapper transforms the argument into a slice of keys to be reconciled
|
||||
ToRequests Mapper
|
||||
toRequests MapFunc
|
||||
}
|
||||
|
||||
// Create implements EventHandler
|
||||
func (e *EnqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object})
|
||||
// Create implements EventHandler.
|
||||
func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.mapAndEnqueue(q, evt.Object, reqs)
|
||||
}
|
||||
|
||||
// Update implements EventHandler
|
||||
func (e *EnqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
e.mapAndEnqueue(q, MapObject{Meta: evt.MetaOld, Object: evt.ObjectOld})
|
||||
e.mapAndEnqueue(q, MapObject{Meta: evt.MetaNew, Object: evt.ObjectNew})
|
||||
// Update implements EventHandler.
|
||||
func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.mapAndEnqueue(q, evt.ObjectOld, reqs)
|
||||
e.mapAndEnqueue(q, evt.ObjectNew, reqs)
|
||||
}
|
||||
|
||||
// Delete implements EventHandler
|
||||
func (e *EnqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object})
|
||||
// Delete implements EventHandler.
|
||||
func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.mapAndEnqueue(q, evt.Object, reqs)
|
||||
}
|
||||
|
||||
// Generic implements EventHandler
|
||||
func (e *EnqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object})
|
||||
// Generic implements EventHandler.
|
||||
func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.mapAndEnqueue(q, evt.Object, reqs)
|
||||
}
|
||||
|
||||
func (e *EnqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object MapObject) {
|
||||
for _, req := range e.ToRequests.Map(object) {
|
||||
q.Add(req)
|
||||
func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) {
|
||||
for _, req := range e.toRequests(object) {
|
||||
_, ok := reqs[req]
|
||||
if !ok {
|
||||
q.Add(req)
|
||||
reqs[req] = empty{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EnqueueRequestsFromMapFunc can inject fields into the mapper.
|
||||
|
||||
// InjectFunc implements inject.Injector.
|
||||
func (e *EnqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error {
|
||||
func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
return f(e.ToRequests)
|
||||
}
|
||||
|
||||
// Mapper maps an object to a collection of keys to be enqueued
|
||||
type Mapper interface {
|
||||
// Map maps an object
|
||||
Map(MapObject) []reconcile.Request
|
||||
}
|
||||
|
||||
// MapObject contains information from an event to be transformed into a Request.
|
||||
type MapObject struct {
|
||||
// Meta is the meta data for an object from an event.
|
||||
Meta metav1.Object
|
||||
|
||||
// Object is the object from an event.
|
||||
Object runtime.Object
|
||||
}
|
||||
|
||||
var _ Mapper = ToRequestsFunc(nil)
|
||||
|
||||
// ToRequestsFunc implements Mapper using a function.
|
||||
type ToRequestsFunc func(MapObject) []reconcile.Request
|
||||
|
||||
// Map implements Mapper
|
||||
func (m ToRequestsFunc) Map(i MapObject) []reconcile.Request {
|
||||
return m(i)
|
||||
return f(e.toRequests)
|
||||
}
|
||||
|
49
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go
generated
vendored
49
vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go
generated
vendored
@ -57,33 +57,39 @@ type EnqueueRequestForOwner struct {
|
||||
mapper meta.RESTMapper
|
||||
}
|
||||
|
||||
// Create implements EventHandler
|
||||
// Create implements EventHandler.
|
||||
func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
for _, req := range e.getOwnerReconcileRequest(evt.Meta) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||
for req := range reqs {
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements EventHandler
|
||||
// Update implements EventHandler.
|
||||
func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
for _, req := range e.getOwnerReconcileRequest(evt.MetaOld) {
|
||||
q.Add(req)
|
||||
}
|
||||
for _, req := range e.getOwnerReconcileRequest(evt.MetaNew) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.getOwnerReconcileRequest(evt.ObjectOld, reqs)
|
||||
e.getOwnerReconcileRequest(evt.ObjectNew, reqs)
|
||||
for req := range reqs {
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete implements EventHandler
|
||||
// Delete implements EventHandler.
|
||||
func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
for _, req := range e.getOwnerReconcileRequest(evt.Meta) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||
for req := range reqs {
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Generic implements EventHandler
|
||||
// Generic implements EventHandler.
|
||||
func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
for _, req := range e.getOwnerReconcileRequest(evt.Meta) {
|
||||
reqs := map[reconcile.Request]empty{}
|
||||
e.getOwnerReconcileRequest(evt.Object, reqs)
|
||||
for req := range reqs {
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
@ -99,29 +105,27 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme)
|
||||
}
|
||||
// Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions.
|
||||
if len(kinds) != 1 {
|
||||
err := fmt.Errorf("Expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds)
|
||||
log.Error(nil, "Expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds)
|
||||
err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds)
|
||||
log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds)
|
||||
return err
|
||||
|
||||
}
|
||||
// Cache the Group and Kind for the OwnerType
|
||||
e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOwnerReconcileRequest looks at object and returns a slice of reconcile.Request to reconcile
|
||||
// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile
|
||||
// owners of object that match e.OwnerType.
|
||||
func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object) []reconcile.Request {
|
||||
func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) {
|
||||
// Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested
|
||||
// by the user
|
||||
var result []reconcile.Request
|
||||
for _, ref := range e.getOwnersReferences(object) {
|
||||
// Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType
|
||||
refGV, err := schema.ParseGroupVersion(ref.APIVersion)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not parse OwnerReference APIVersion",
|
||||
"api version", ref.APIVersion)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user.
|
||||
@ -138,23 +142,20 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object)
|
||||
mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||
request.Namespace = object.GetNamespace()
|
||||
}
|
||||
|
||||
result = append(result, request)
|
||||
result[request] = empty{}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the matches
|
||||
return result
|
||||
}
|
||||
|
||||
// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner
|
||||
// - if IsController is true: only take the Controller OwnerReference (if found)
|
||||
// - if IsController is false: take all OwnerReferences
|
||||
// - if IsController is false: take all OwnerReferences.
|
||||
func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference {
|
||||
if object == nil {
|
||||
return nil
|
||||
|
8
vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
generated
vendored
8
vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
generated
vendored
@ -75,28 +75,28 @@ type Funcs struct {
|
||||
GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface)
|
||||
}
|
||||
|
||||
// Create implements EventHandler
|
||||
// Create implements EventHandler.
|
||||
func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
if h.CreateFunc != nil {
|
||||
h.CreateFunc(e, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete implements EventHandler
|
||||
// Delete implements EventHandler.
|
||||
func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
if h.DeleteFunc != nil {
|
||||
h.DeleteFunc(e, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements EventHandler
|
||||
// Update implements EventHandler.
|
||||
func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
if h.UpdateFunc != nil {
|
||||
h.UpdateFunc(e, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Generic implements EventHandler
|
||||
// Generic implements EventHandler.
|
||||
func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
if h.GenericFunc != nil {
|
||||
h.GenericFunc(e, q)
|
||||
|
9
vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go
generated
vendored
9
vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go
generated
vendored
@ -35,7 +35,7 @@ type Handler struct {
|
||||
Checks map[string]Checker
|
||||
}
|
||||
|
||||
// checkStatus holds the output of a particular check
|
||||
// checkStatus holds the output of a particular check.
|
||||
type checkStatus struct {
|
||||
name string
|
||||
healthy bool
|
||||
@ -173,8 +173,7 @@ type CheckHandler struct {
|
||||
}
|
||||
|
||||
func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
err := h.Checker(req)
|
||||
if err != nil {
|
||||
if err := h.Checker(req); err != nil {
|
||||
http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError)
|
||||
} else {
|
||||
fmt.Fprint(resp, "ok")
|
||||
@ -184,10 +183,10 @@ func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
// Checker knows how to perform a health check.
|
||||
type Checker func(req *http.Request) error
|
||||
|
||||
// Ping returns true automatically when checked
|
||||
// Ping returns true automatically when checked.
|
||||
var Ping Checker = func(_ *http.Request) error { return nil }
|
||||
|
||||
// getExcludedChecks extracts the health check names to be excluded from the query param
|
||||
// getExcludedChecks extracts the health check names to be excluded from the query param.
|
||||
func getExcludedChecks(r *http.Request) sets.String {
|
||||
checks, found := r.URL.Query()["exclude"]
|
||||
if found {
|
||||
|
260
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
260
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
@ -17,32 +17,27 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"github.com/go-logr/logr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
var log = logf.RuntimeLog.WithName("controller")
|
||||
|
||||
var _ inject.Injector = &Controller{}
|
||||
|
||||
// Controller implements controller.Controller
|
||||
// Controller implements controller.Controller.
|
||||
type Controller struct {
|
||||
// Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
|
||||
Name string
|
||||
@ -55,19 +50,6 @@ type Controller struct {
|
||||
// Defaults to the DefaultReconcileFunc.
|
||||
Do reconcile.Reconciler
|
||||
|
||||
// Client is a lazily initialized Client. The controllerManager will initialize this when Start is called.
|
||||
Client client.Client
|
||||
|
||||
// Scheme is injected by the controllerManager when controllerManager.Start is called
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
// informers are injected by the controllerManager when controllerManager.Start is called
|
||||
Cache cache.Cache
|
||||
|
||||
// Config is the rest.Config used to talk to the apiserver. Defaults to one of in-cluster, environment variable
|
||||
// specified, or the ~/.kube/Config.
|
||||
Config *rest.Config
|
||||
|
||||
// MakeQueue constructs the queue for this controller once the controller is ready to start.
|
||||
// This exists because the standard Kubernetes workqueues start themselves immediately, which
|
||||
// leads to goroutine leaks if something calls controller.New repeatedly.
|
||||
@ -78,29 +60,31 @@ type Controller struct {
|
||||
Queue workqueue.RateLimitingInterface
|
||||
|
||||
// SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates
|
||||
// Deprecated: the caller should handle injected fields itself.
|
||||
SetFields func(i interface{}) error
|
||||
|
||||
// mu is used to synchronize Controller setup
|
||||
mu sync.Mutex
|
||||
|
||||
// JitterPeriod allows tests to reduce the JitterPeriod so they complete faster
|
||||
JitterPeriod time.Duration
|
||||
|
||||
// WaitForCacheSync allows tests to mock out the WaitForCacheSync function to return an error
|
||||
// defaults to Cache.WaitForCacheSync
|
||||
WaitForCacheSync func(stopCh <-chan struct{}) bool
|
||||
|
||||
// Started is true if the Controller has been Started
|
||||
Started bool
|
||||
|
||||
// Recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
Recorder record.EventRecorder
|
||||
// ctx is the context that was passed to Start() and used when starting watches.
|
||||
//
|
||||
// According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context,
|
||||
// while we usually always strive to follow best practices, we consider this a legacy case and it should
|
||||
// undergo a major refactoring and redesign to allow for context to not be stored in a struct.
|
||||
ctx context.Context
|
||||
|
||||
// TODO(community): Consider initializing a logger with the Controller Name as the tag
|
||||
// CacheSyncTimeout refers to the time limit set on waiting for cache to sync
|
||||
// Defaults to 2 minutes if not set.
|
||||
CacheSyncTimeout time.Duration
|
||||
|
||||
// watches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
||||
watches []watchDescription
|
||||
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
||||
startWatches []watchDescription
|
||||
|
||||
// Log is used to log messages to users during reconciliation, or for example when a watch is started.
|
||||
Log logr.Logger
|
||||
}
|
||||
|
||||
// watchDescription contains all the information necessary to start a watch.
|
||||
@ -110,12 +94,14 @@ type watchDescription struct {
|
||||
predicates []predicate.Predicate
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler
|
||||
func (c *Controller) Reconcile(r reconcile.Request) (reconcile.Result, error) {
|
||||
return c.Do.Reconcile(r)
|
||||
// Reconcile implements reconcile.Reconciler.
|
||||
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
|
||||
ctx = logf.IntoContext(ctx, log)
|
||||
return c.Do.Reconcile(ctx, req)
|
||||
}
|
||||
|
||||
// Watch implements controller.Controller
|
||||
// Watch implements controller.Controller.
|
||||
func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
@ -133,24 +119,39 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
|
||||
}
|
||||
}
|
||||
|
||||
c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct})
|
||||
if c.Started {
|
||||
log.Info("Starting EventSource", "controller", c.Name, "source", src)
|
||||
return src.Start(evthdler, c.Queue, prct...)
|
||||
// Controller hasn't started yet, store the watches locally and return.
|
||||
//
|
||||
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
|
||||
if !c.Started {
|
||||
c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct})
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
c.Log.Info("Starting EventSource", "source", src)
|
||||
return src.Start(c.ctx, evthdler, c.Queue, prct...)
|
||||
}
|
||||
|
||||
// Start implements controller.Controller
|
||||
func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
// Start implements controller.Controller.
|
||||
func (c *Controller) Start(ctx context.Context) error {
|
||||
// use an IIFE to get proper lock handling
|
||||
// but lock outside to get proper handling of the queue shutdown
|
||||
c.mu.Lock()
|
||||
if c.Started {
|
||||
return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times")
|
||||
}
|
||||
|
||||
c.initMetrics()
|
||||
|
||||
// Set the internal context.
|
||||
c.ctx = ctx
|
||||
|
||||
c.Queue = c.MakeQueue()
|
||||
defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
c.Queue.ShutDown()
|
||||
}()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
err := func() error {
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@ -160,37 +161,59 @@ func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
// NB(directxman12): launch the sources *before* trying to wait for the
|
||||
// caches to sync so that they have a chance to register their intendeded
|
||||
// caches.
|
||||
for _, watch := range c.watches {
|
||||
log.Info("Starting EventSource", "controller", c.Name, "source", watch.src)
|
||||
if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil {
|
||||
for _, watch := range c.startWatches {
|
||||
c.Log.Info("Starting EventSource", "source", watch.src)
|
||||
|
||||
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
|
||||
log.Info("Starting Controller", "controller", c.Name)
|
||||
c.Log.Info("Starting Controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
if c.WaitForCacheSync == nil {
|
||||
c.WaitForCacheSync = c.Cache.WaitForCacheSync
|
||||
}
|
||||
if ok := c.WaitForCacheSync(stop); !ok {
|
||||
// This code is unreachable right now since WaitForCacheSync will never return an error
|
||||
// Leaving it here because that could happen in the future
|
||||
err := fmt.Errorf("failed to wait for %s caches to sync", c.Name)
|
||||
log.Error(err, "Could not wait for Cache to sync", "controller", c.Name)
|
||||
return err
|
||||
for _, watch := range c.startWatches {
|
||||
syncingSource, ok := watch.src.(source.SyncingSource)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
// use a context with timeout for launching sources and syncing caches.
|
||||
sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
|
||||
defer cancel()
|
||||
|
||||
// WaitForSync waits for a definitive timeout, and returns if there
|
||||
// is an error or a timeout
|
||||
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
|
||||
err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err)
|
||||
c.Log.Error(err, "Could not wait for Cache to sync")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.JitterPeriod == 0 {
|
||||
c.JitterPeriod = 1 * time.Second
|
||||
}
|
||||
// All the watches have been started, we can reset the local slice.
|
||||
//
|
||||
// We should never hold watches more than necessary, each watch source can hold a backing cache,
|
||||
// which won't be garbage collected if we hold a reference to it.
|
||||
c.startWatches = nil
|
||||
|
||||
// Launch workers to process resources
|
||||
log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles)
|
||||
c.Log.Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
|
||||
wg.Add(c.MaxConcurrentReconciles)
|
||||
for i := 0; i < c.MaxConcurrentReconciles; i++ {
|
||||
// Process work items
|
||||
go wait.Until(c.worker, c.JitterPeriod, stop)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Run a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
|
||||
for c.processNextWorkItem(ctx) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
c.Started = true
|
||||
@ -200,21 +223,16 @@ func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
<-stop
|
||||
log.Info("Stopping workers", "controller", c.Name)
|
||||
<-ctx.Done()
|
||||
c.Log.Info("Shutdown signal received, waiting for all workers to finish")
|
||||
wg.Wait()
|
||||
c.Log.Info("All workers finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
|
||||
func (c *Controller) worker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling the reconcileHandler.
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
obj, shutdown := c.Queue.Get()
|
||||
if shutdown {
|
||||
// Stop working
|
||||
@ -229,70 +247,92 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
// period.
|
||||
defer c.Queue.Done(obj)
|
||||
|
||||
return c.reconcileHandler(obj)
|
||||
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1)
|
||||
defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1)
|
||||
|
||||
c.reconcileHandler(ctx, obj)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) reconcileHandler(obj interface{}) bool {
|
||||
const (
|
||||
labelError = "error"
|
||||
labelRequeueAfter = "requeue_after"
|
||||
labelRequeue = "requeue"
|
||||
labelSuccess = "success"
|
||||
)
|
||||
|
||||
func (c *Controller) initMetrics() {
|
||||
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0)
|
||||
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0)
|
||||
ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles))
|
||||
}
|
||||
|
||||
func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
|
||||
// Update metrics after processing each item
|
||||
reconcileStartTS := time.Now()
|
||||
defer func() {
|
||||
c.updateMetrics(time.Since(reconcileStartTS))
|
||||
}()
|
||||
|
||||
var req reconcile.Request
|
||||
var ok bool
|
||||
if req, ok = obj.(reconcile.Request); !ok {
|
||||
// Make sure that the the object is a valid request.
|
||||
req, ok := obj.(reconcile.Request)
|
||||
if !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.Queue.Forget(obj)
|
||||
log.Error(nil, "Queue item was not a Request",
|
||||
"controller", c.Name, "type", fmt.Sprintf("%T", obj), "value", obj)
|
||||
c.Log.Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
|
||||
// Return true, don't take a break
|
||||
return true
|
||||
return
|
||||
}
|
||||
// RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the
|
||||
|
||||
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
|
||||
ctx = logf.IntoContext(ctx, log)
|
||||
|
||||
// RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the
|
||||
// resource to be synced.
|
||||
if result, err := c.Do.Reconcile(req); err != nil {
|
||||
result, err := c.Do.Reconcile(ctx, req)
|
||||
switch {
|
||||
case err != nil:
|
||||
c.Queue.AddRateLimited(req)
|
||||
log.Error(err, "Reconciler error", "controller", c.Name, "request", req)
|
||||
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc()
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc()
|
||||
return false
|
||||
} else if result.RequeueAfter > 0 {
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc()
|
||||
log.Error(err, "Reconciler error")
|
||||
case result.RequeueAfter > 0:
|
||||
// The result.RequeueAfter request will be lost, if it is returned
|
||||
// along with a non-nil error. But this is intended as
|
||||
// We need to drive to stable reconcile loops before queuing due
|
||||
// to result.RequestAfter
|
||||
c.Queue.Forget(obj)
|
||||
c.Queue.AddAfter(req, result.RequeueAfter)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc()
|
||||
return true
|
||||
} else if result.Requeue {
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
|
||||
case result.Requeue:
|
||||
c.Queue.AddRateLimited(req)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue").Inc()
|
||||
return true
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc()
|
||||
default:
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.Queue.Forget(obj)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc()
|
||||
}
|
||||
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.Queue.Forget(obj)
|
||||
|
||||
// TODO(directxman12): What does 1 mean? Do we want level constants? Do we want levels at all?
|
||||
log.V(1).Info("Successfully Reconciled", "controller", c.Name, "request", req)
|
||||
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "success").Inc()
|
||||
// Return true, don't take a break
|
||||
return true
|
||||
}
|
||||
|
||||
// InjectFunc implement SetFields.Injector
|
||||
// GetLogger returns this controller's logger.
|
||||
func (c *Controller) GetLogger() logr.Logger {
|
||||
return c.Log
|
||||
}
|
||||
|
||||
// InjectFunc implement SetFields.Injector.
|
||||
func (c *Controller) InjectFunc(f inject.Func) error {
|
||||
c.SetFields = f
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateMetrics updates prometheus metrics within the controller
|
||||
// updateMetrics updates prometheus metrics within the controller.
|
||||
func (c *Controller) updateMetrics(reconcileTime time.Duration) {
|
||||
ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds())
|
||||
}
|
||||
|
29
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go
generated
vendored
29
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go
generated
vendored
@ -18,6 +18,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
@ -25,24 +26,40 @@ var (
|
||||
// ReconcileTotal is a prometheus counter metrics which holds the total
|
||||
// number of reconciliations per controller. It has two labels. controller label refers
|
||||
// to the controller name and result label refers to the reconcile result i.e
|
||||
// success, error, requeue, requeue_after
|
||||
// success, error, requeue, requeue_after.
|
||||
ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "controller_runtime_reconcile_total",
|
||||
Help: "Total number of reconciliations per controller",
|
||||
}, []string{"controller", "result"})
|
||||
|
||||
// ReconcileErrors is a prometheus counter metrics which holds the total
|
||||
// number of errors from the Reconciler
|
||||
// number of errors from the Reconciler.
|
||||
ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "controller_runtime_reconcile_errors_total",
|
||||
Help: "Total number of reconciliation errors per controller",
|
||||
}, []string{"controller"})
|
||||
|
||||
// ReconcileTime is a prometheus metric which keeps track of the duration
|
||||
// of reconciliations
|
||||
// of reconciliations.
|
||||
ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "controller_runtime_reconcile_time_seconds",
|
||||
Help: "Length of time per reconciliation per controller",
|
||||
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
|
||||
1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60},
|
||||
}, []string{"controller"})
|
||||
|
||||
// WorkerCount is a prometheus metric which holds the number of
|
||||
// concurrent reconciles per controller.
|
||||
WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "controller_runtime_max_concurrent_reconciles",
|
||||
Help: "Maximum number of concurrent reconciles per controller",
|
||||
}, []string{"controller"})
|
||||
|
||||
// ActiveWorkers is a prometheus metric which holds the number
|
||||
// of active workers per controller.
|
||||
ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "controller_runtime_active_workers",
|
||||
Help: "Number of currently used workers per controller",
|
||||
}, []string{"controller"})
|
||||
)
|
||||
|
||||
@ -51,9 +68,11 @@ func init() {
|
||||
ReconcileTotal,
|
||||
ReconcileErrors,
|
||||
ReconcileTime,
|
||||
WorkerCount,
|
||||
ActiveWorkers,
|
||||
// expose process metrics like CPU, Memory, file descriptor usage etc.
|
||||
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
// expose Go runtime metrics like GC stats, memory stats etc.
|
||||
prometheus.NewGoCollector(),
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
}
|
||||
|
3
vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go
generated
vendored
3
vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go
generated
vendored
@ -14,9 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package log contains utilities for fetching a new logger
|
||||
// when one is not already available.
|
||||
// Deprecated: use pkg/log
|
||||
package log
|
||||
|
||||
import (
|
||||
|
78
vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
generated
vendored
Normal file
78
vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package objectutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// FilterWithLabels returns a copy of the items in objs matching labelSel.
|
||||
func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) {
|
||||
outItems := make([]runtime.Object, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
meta, err := apimeta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if labelSel != nil {
|
||||
lbls := labels.Set(meta.GetLabels())
|
||||
if !labelSel.Matches(lbls) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
outItems = append(outItems, obj.DeepCopyObject())
|
||||
}
|
||||
return outItems, nil
|
||||
}
|
||||
|
||||
// IsAPINamespaced returns true if the object is namespace scoped.
|
||||
// For unstructured objects the gvk is found from the object itself.
|
||||
func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, scheme)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return IsAPINamespacedWithGVK(gvk, scheme, restmapper)
|
||||
}
|
||||
|
||||
// IsAPINamespacedWithGVK returns true if the object having the provided
|
||||
// GVK is namespace scoped.
|
||||
func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
|
||||
restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get restmapping: %w", err)
|
||||
}
|
||||
|
||||
scope := restmapping.Scope.Name()
|
||||
|
||||
if scope == "" {
|
||||
return false, errors.New("scope cannot be identified, empty scope returned")
|
||||
}
|
||||
|
||||
if scope != apimeta.RESTScopeNameRoot {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
147
vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go
generated
vendored
147
vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go
generated
vendored
@ -17,7 +17,9 @@ limitations under the License.
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -26,35 +28,150 @@ import (
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
// EventBroadcasterProducer makes an event broadcaster, returning
|
||||
// whether or not the broadcaster should be stopped with the Provider,
|
||||
// or not (e.g. if it's shared, it shouldn't be stopped with the Provider).
|
||||
type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool)
|
||||
|
||||
// Provider is a recorder.Provider that records events to the k8s API server
|
||||
// and to a logr Logger.
|
||||
type Provider struct {
|
||||
lock sync.RWMutex
|
||||
stopped bool
|
||||
|
||||
// scheme to specify when creating a recorder
|
||||
scheme *runtime.Scheme
|
||||
// eventBroadcaster to create new recorder instance
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
// logger is the logger to use when logging diagnostic event info
|
||||
logger logr.Logger
|
||||
logger logr.Logger
|
||||
evtClient typedcorev1.EventInterface
|
||||
makeBroadcaster EventBroadcasterProducer
|
||||
|
||||
broadcasterOnce sync.Once
|
||||
broadcaster record.EventBroadcaster
|
||||
stopBroadcaster bool
|
||||
}
|
||||
|
||||
// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to
|
||||
// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election
|
||||
// code finishes up and tries to continue emitting events.
|
||||
|
||||
// Stop attempts to stop this provider, stopping the underlying broadcaster
|
||||
// if the broadcaster asked to be stopped. It kinda tries to honor the given
|
||||
// context, but the underlying broadcaster has an indefinite wait that doesn't
|
||||
// return until all queued events are flushed, so this may end up just returning
|
||||
// before the underlying wait has finished instead of cancelling the wait.
|
||||
// This is Very Frustrating™.
|
||||
func (p *Provider) Stop(shutdownCtx context.Context) {
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
// technically, this could start the broadcaster, but practically, it's
|
||||
// almost certainly already been started (e.g. by leader election). We
|
||||
// need to invoke this to ensure that we don't inadvertently race with
|
||||
// an invocation of getBroadcaster.
|
||||
broadcaster := p.getBroadcaster()
|
||||
if p.stopBroadcaster {
|
||||
p.lock.Lock()
|
||||
broadcaster.Shutdown()
|
||||
p.stopped = true
|
||||
p.lock.Unlock()
|
||||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-shutdownCtx.Done():
|
||||
case <-doneCh:
|
||||
}
|
||||
}
|
||||
|
||||
// getBroadcaster ensures that a broadcaster is started for this
|
||||
// provider, and returns it. It's threadsafe.
|
||||
func (p *Provider) getBroadcaster() record.EventBroadcaster {
|
||||
// NB(directxman12): this can technically still leak if something calls
|
||||
// "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we
|
||||
// create the broadcaster in start, we could race with other things that
|
||||
// are started at the same time & want to emit events. The alternative is
|
||||
// silently swallowing events and more locking, but that seems suboptimal.
|
||||
|
||||
p.broadcasterOnce.Do(func() {
|
||||
broadcaster, stop := p.makeBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.evtClient})
|
||||
broadcaster.StartEventWatcher(
|
||||
func(e *corev1.Event) {
|
||||
p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
p.broadcaster = broadcaster
|
||||
p.stopBroadcaster = stop
|
||||
})
|
||||
|
||||
return p.broadcaster
|
||||
}
|
||||
|
||||
// NewProvider create a new Provider instance.
|
||||
func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) {
|
||||
func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init clientSet: %w", err)
|
||||
}
|
||||
|
||||
p := &provider{scheme: scheme, logger: logger, eventBroadcaster: broadcaster}
|
||||
p.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
p.eventBroadcaster.StartEventWatcher(
|
||||
func(e *corev1.Event) {
|
||||
p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
|
||||
p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: clientSet.CoreV1().Events("")}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *provider) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return p.eventBroadcaster.NewRecorder(p.scheme, corev1.EventSource{Component: name})
|
||||
// GetEventRecorderFor returns an event recorder that broadcasts to this provider's
|
||||
// broadcaster. All events will be associated with a component of the given name.
|
||||
func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return &lazyRecorder{
|
||||
prov: p,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// lazyRecorder is a recorder that doesn't actually instantiate any underlying
|
||||
// recorder until the first event is emitted.
|
||||
type lazyRecorder struct {
|
||||
prov *Provider
|
||||
name string
|
||||
|
||||
recOnce sync.Once
|
||||
rec record.EventRecorder
|
||||
}
|
||||
|
||||
// ensureRecording ensures that a concrete recorder is populated for this recorder.
|
||||
func (l *lazyRecorder) ensureRecording() {
|
||||
l.recOnce.Do(func() {
|
||||
broadcaster := l.prov.getBroadcaster()
|
||||
l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name})
|
||||
})
|
||||
}
|
||||
|
||||
func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.Event(object, eventtype, reason, message)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.Eventf(object, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go
generated
vendored
@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package leaderelection contains a constructors for a leader election resource lock.
|
||||
Package leaderelection contains a constructor for a leader election resource lock.
|
||||
This is used to ensure that multiple copies of a controller manager can be run with
|
||||
only one active set of controllers, for active-passive HA.
|
||||
|
||||
|
28
vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
generated
vendored
28
vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
generated
vendored
@ -31,28 +31,38 @@ import (
|
||||
|
||||
const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
||||
|
||||
// Options provides the required configuration to create a new resource lock
|
||||
// Options provides the required configuration to create a new resource lock.
|
||||
type Options struct {
|
||||
// LeaderElection determines whether or not to use leader election when
|
||||
// starting the manager.
|
||||
LeaderElection bool
|
||||
|
||||
// LeaderElectionResourceLock determines which resource lock to use for leader election,
|
||||
// defaults to "configmapsleases".
|
||||
LeaderElectionResourceLock string
|
||||
|
||||
// LeaderElectionNamespace determines the namespace in which the leader
|
||||
// election configmap will be created.
|
||||
// election resource will be created.
|
||||
LeaderElectionNamespace string
|
||||
|
||||
// LeaderElectionID determines the name of the configmap that leader election
|
||||
// LeaderElectionID determines the name of the resource that leader election
|
||||
// will use for holding the leader lock.
|
||||
LeaderElectionID string
|
||||
}
|
||||
|
||||
// NewResourceLock creates a new config map resource lock for use in a leader
|
||||
// election loop
|
||||
// NewResourceLock creates a new resource lock for use in a leader election loop.
|
||||
func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) {
|
||||
if !options.LeaderElection {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Default resource lock to "configmapsleases". We must keep this default until we are sure all controller-runtime
|
||||
// users have upgraded from the original default ConfigMap lock to a controller-runtime version that has this new
|
||||
// default. Many users of controller-runtime skip versions, so we should be extremely conservative here.
|
||||
if options.LeaderElectionResourceLock == "" {
|
||||
options.LeaderElectionResourceLock = resourcelock.ConfigMapsLeasesResourceLock
|
||||
}
|
||||
|
||||
// LeaderElectionID must be provided to prevent clashes
|
||||
if options.LeaderElectionID == "" {
|
||||
return nil, errors.New("LeaderElectionID must be configured")
|
||||
@ -75,13 +85,12 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
// Construct client for leader election
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
client, err := kubernetes.NewForConfig(rest.AddUserAgent(config, "leader-election"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(JoelSpeed): switch to leaderelection object in 1.12
|
||||
return resourcelock.New(resourcelock.ConfigMapsResourceLock,
|
||||
return resourcelock.New(options.LeaderElectionResourceLock,
|
||||
options.LeaderElectionNamespace,
|
||||
options.LeaderElectionID,
|
||||
client.CoreV1(),
|
||||
@ -95,8 +104,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
|
||||
func getInClusterNamespace() (string, error) {
|
||||
// Check whether the namespace file exists.
|
||||
// If not, we are not running in cluster so can't guess the namespace.
|
||||
_, err := os.Stat(inClusterNamespacePath)
|
||||
if os.IsNotExist(err) {
|
||||
if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace")
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("error checking namespace file: %w", err)
|
||||
|
109
vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go
generated
vendored
109
vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go
generated
vendored
@ -29,11 +29,11 @@ type loggerPromise struct {
|
||||
childPromises []*loggerPromise
|
||||
promisesLock sync.Mutex
|
||||
|
||||
name *string
|
||||
tags []interface{}
|
||||
name *string
|
||||
tags []interface{}
|
||||
level int
|
||||
}
|
||||
|
||||
// WithName provides a new Logger with the name appended
|
||||
func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromise {
|
||||
res := &loggerPromise{
|
||||
logger: l,
|
||||
@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis
|
||||
return res
|
||||
}
|
||||
|
||||
// WithValues provides a new Logger with the tags appended
|
||||
// WithValues provides a new Logger with the tags appended.
|
||||
func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise {
|
||||
res := &loggerPromise{
|
||||
logger: l,
|
||||
@ -61,7 +61,20 @@ func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *lo
|
||||
return res
|
||||
}
|
||||
|
||||
// Fulfill instantiates the Logger with the provided logger
|
||||
func (p *loggerPromise) V(l *DelegatingLogger, level int) *loggerPromise {
|
||||
res := &loggerPromise{
|
||||
logger: l,
|
||||
level: level,
|
||||
promisesLock: sync.Mutex{},
|
||||
}
|
||||
|
||||
p.promisesLock.Lock()
|
||||
defer p.promisesLock.Unlock()
|
||||
p.childPromises = append(p.childPromises, res)
|
||||
return res
|
||||
}
|
||||
|
||||
// Fulfill instantiates the Logger with the provided logger.
|
||||
func (p *loggerPromise) Fulfill(parentLogger logr.Logger) {
|
||||
var logger = parentLogger
|
||||
if p.name != nil {
|
||||
@ -71,9 +84,14 @@ func (p *loggerPromise) Fulfill(parentLogger logr.Logger) {
|
||||
if p.tags != nil {
|
||||
logger = logger.WithValues(p.tags...)
|
||||
}
|
||||
if p.level != 0 {
|
||||
logger = logger.V(p.level)
|
||||
}
|
||||
|
||||
p.logger.Logger = logger
|
||||
p.logger.lock.Lock()
|
||||
p.logger.logger = logger
|
||||
p.logger.promise = nil
|
||||
p.logger.lock.Unlock()
|
||||
|
||||
for _, childPromise := range p.childPromises {
|
||||
childPromise.Fulfill(logger)
|
||||
@ -86,30 +104,91 @@ func (p *loggerPromise) Fulfill(parentLogger logr.Logger) {
|
||||
// logger. It expects to have *some* logr.Logger set at all times (generally
|
||||
// a no-op logger before the promises are fulfilled).
|
||||
type DelegatingLogger struct {
|
||||
logr.Logger
|
||||
lock sync.RWMutex
|
||||
logger logr.Logger
|
||||
promise *loggerPromise
|
||||
}
|
||||
|
||||
// WithName provides a new Logger with the name appended
|
||||
func (l *DelegatingLogger) WithName(name string) logr.Logger {
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info
|
||||
// logs.
|
||||
func (l *DelegatingLogger) Enabled() bool {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
return l.logger.Enabled()
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to
|
||||
// the log line. The key/value pairs can then be used to add additional
|
||||
// variable information. The key/value pairs should alternate string
|
||||
// keys and arbitrary values.
|
||||
func (l *DelegatingLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
l.logger.Info(msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to calling Info with the "error" named value, but may
|
||||
// have unique behavior, and should be preferred for logging errors (see the
|
||||
// package documentations for more information).
|
||||
//
|
||||
// The msg field should be used to add context to any underlying error,
|
||||
// while the err field should be used to attach the actual error that
|
||||
// triggered this log line, if present.
|
||||
func (l *DelegatingLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
l.logger.Error(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// V returns an Logger value for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V values are additive. V higher verbosity
|
||||
// level means a log message is less important. It's illegal to pass a log
|
||||
// level less than zero.
|
||||
func (l *DelegatingLogger) V(level int) logr.Logger {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
|
||||
if l.promise == nil {
|
||||
return l.Logger.WithName(name)
|
||||
return l.logger.V(level)
|
||||
}
|
||||
|
||||
res := &DelegatingLogger{Logger: l.Logger}
|
||||
res := &DelegatingLogger{logger: l.logger}
|
||||
promise := l.promise.V(res, level)
|
||||
res.promise = promise
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// WithName provides a new Logger with the name appended.
|
||||
func (l *DelegatingLogger) WithName(name string) logr.Logger {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
|
||||
if l.promise == nil {
|
||||
return l.logger.WithName(name)
|
||||
}
|
||||
|
||||
res := &DelegatingLogger{logger: l.logger}
|
||||
promise := l.promise.WithName(res, name)
|
||||
res.promise = promise
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// WithValues provides a new Logger with the tags appended
|
||||
// WithValues provides a new Logger with the tags appended.
|
||||
func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger {
|
||||
l.lock.RLock()
|
||||
defer l.lock.RUnlock()
|
||||
|
||||
if l.promise == nil {
|
||||
return l.Logger.WithValues(tags...)
|
||||
return l.logger.WithValues(tags...)
|
||||
}
|
||||
|
||||
res := &DelegatingLogger{Logger: l.Logger}
|
||||
res := &DelegatingLogger{logger: l.logger}
|
||||
promise := l.promise.WithValues(res, tags...)
|
||||
res.promise = promise
|
||||
|
||||
@ -129,7 +208,7 @@ func (l *DelegatingLogger) Fulfill(actual logr.Logger) {
|
||||
// the given logger before it's promise is fulfilled.
|
||||
func NewDelegatingLogger(initial logr.Logger) *DelegatingLogger {
|
||||
l := &DelegatingLogger{
|
||||
Logger: initial,
|
||||
logger: initial,
|
||||
promise: &loggerPromise{promisesLock: sync.Mutex{}},
|
||||
}
|
||||
l.promise.logger = l
|
||||
|
55
vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go
generated
vendored
55
vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go
generated
vendored
@ -34,15 +34,66 @@ limitations under the License.
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// SetLogger sets a concrete logging implementation for all deferred Loggers.
|
||||
func SetLogger(l logr.Logger) {
|
||||
loggerWasSetLock.Lock()
|
||||
defer loggerWasSetLock.Unlock()
|
||||
|
||||
loggerWasSet = true
|
||||
Log.Fulfill(l)
|
||||
}
|
||||
|
||||
// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries
|
||||
// lifetime, it will never get set. The DelegatingLogger causes a high number of memory
|
||||
// allocations when not given an actual Logger, so we set a NullLogger to avoid that.
|
||||
//
|
||||
// We need to keep the DelegatingLogger because we have various inits() that get a logger from
|
||||
// here. They will always get executed before any code that imports controller-runtime
|
||||
// has a chance to run and hence to set an actual logger.
|
||||
func init() {
|
||||
// Init is blocking, so start a new goroutine
|
||||
go func() {
|
||||
time.Sleep(30 * time.Second)
|
||||
loggerWasSetLock.Lock()
|
||||
defer loggerWasSetLock.Unlock()
|
||||
if !loggerWasSet {
|
||||
Log.Fulfill(NullLogger{})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
loggerWasSetLock sync.Mutex
|
||||
loggerWasSet bool
|
||||
)
|
||||
|
||||
// Log is the base logger used by kubebuilder. It delegates
|
||||
// to another logr.Logger. You *must* call SetLogger to
|
||||
// get any actual logging.
|
||||
// to another logr.Logger. You *must* call SetLogger to
|
||||
// get any actual logging. If SetLogger is not called within
|
||||
// the first 30 seconds of a binaries lifetime, it will get
|
||||
// set to a NullLogger.
|
||||
var Log = NewDelegatingLogger(NullLogger{})
|
||||
|
||||
// FromContext returns a logger with predefined values from a context.Context.
|
||||
func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger {
|
||||
var log logr.Logger = Log
|
||||
if ctx != nil {
|
||||
if logger := logr.FromContext(ctx); logger != nil {
|
||||
log = logger
|
||||
}
|
||||
}
|
||||
return log.WithValues(keysAndValues...)
|
||||
}
|
||||
|
||||
// IntoContext takes a context and sets the logger as one of its values.
|
||||
// Use FromContext function to retrieve the logger.
|
||||
func IntoContext(ctx context.Context, log logr.Logger) context.Context {
|
||||
return logr.NewContext(ctx, log)
|
||||
}
|
||||
|
14
vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go
generated
vendored
14
vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go
generated
vendored
@ -29,32 +29,32 @@ type NullLogger struct{}
|
||||
|
||||
var _ logr.Logger = NullLogger{}
|
||||
|
||||
// Info implements logr.InfoLogger
|
||||
// Info implements logr.InfoLogger.
|
||||
func (NullLogger) Info(_ string, _ ...interface{}) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// Enabled implements logr.InfoLogger
|
||||
// Enabled implements logr.InfoLogger.
|
||||
func (NullLogger) Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Error implements logr.Logger
|
||||
// Error implements logr.Logger.
|
||||
func (NullLogger) Error(_ error, _ string, _ ...interface{}) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// V implements logr.Logger
|
||||
func (log NullLogger) V(_ int) logr.InfoLogger {
|
||||
// V implements logr.Logger.
|
||||
func (log NullLogger) V(_ int) logr.Logger {
|
||||
return log
|
||||
}
|
||||
|
||||
// WithName implements logr.Logger
|
||||
// WithName implements logr.Logger.
|
||||
func (log NullLogger) WithName(_ string) logr.Logger {
|
||||
return log
|
||||
}
|
||||
|
||||
// WithValues implements logr.Logger
|
||||
// WithValues implements logr.Logger.
|
||||
func (log NullLogger) WithValues(_ ...interface{}) logr.Logger {
|
||||
return log
|
||||
}
|
||||
|
76
vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go
generated
vendored
Normal file
76
vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// KubeAPIWarningLoggerOptions controls the behavior
|
||||
// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger().
|
||||
type KubeAPIWarningLoggerOptions struct {
|
||||
// Deduplicate indicates a given warning message should only be written once.
|
||||
// Setting this to true in a long-running process handling many warnings can
|
||||
// result in increased memory use.
|
||||
Deduplicate bool
|
||||
}
|
||||
|
||||
// KubeAPIWarningLogger is a wrapper around
|
||||
// a provided logr.Logger that implements the
|
||||
// rest.WarningHandler interface.
|
||||
type KubeAPIWarningLogger struct {
|
||||
// logger is used to log responses with the warning header
|
||||
logger logr.Logger
|
||||
// opts contain options controlling warning output
|
||||
opts KubeAPIWarningLoggerOptions
|
||||
// writtenLock gurads written
|
||||
writtenLock sync.Mutex
|
||||
// used to keep track of already logged messages
|
||||
// and help in de-duplication.
|
||||
written map[string]struct{}
|
||||
}
|
||||
|
||||
// HandleWarningHeader handles logging for responses from API server that are
|
||||
// warnings with code being 299 and uses a logr.Logger for it's logging purposes.
|
||||
func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) {
|
||||
if code != 299 || len(message) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if l.opts.Deduplicate {
|
||||
l.writtenLock.Lock()
|
||||
defer l.writtenLock.Unlock()
|
||||
|
||||
if _, alreadyLogged := l.written[message]; alreadyLogged {
|
||||
return
|
||||
}
|
||||
l.written[message] = struct{}{}
|
||||
}
|
||||
l.logger.Info(message)
|
||||
}
|
||||
|
||||
// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings
|
||||
// with code = 299 to the provided logr.Logger.
|
||||
func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger {
|
||||
h := &KubeAPIWarningLogger{logger: l, opts: opts}
|
||||
if opts.Deduplicate {
|
||||
h.written = map[string]struct{}{}
|
||||
}
|
||||
return h
|
||||
}
|
554
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
554
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
@ -18,15 +18,18 @@ package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
@ -34,64 +37,51 @@ import (
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
const (
|
||||
// Values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go
|
||||
defaultLeaseDuration = 15 * time.Second
|
||||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 2 * time.Second
|
||||
defaultLeaseDuration = 15 * time.Second
|
||||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 2 * time.Second
|
||||
defaultGracefulShutdownPeriod = 30 * time.Second
|
||||
|
||||
defaultReadinessEndpoint = "/readyz"
|
||||
defaultLivenessEndpoint = "/healthz"
|
||||
defaultMetricsEndpoint = "/metrics"
|
||||
)
|
||||
|
||||
var log = logf.RuntimeLog.WithName("manager")
|
||||
var _ Runnable = &controllerManager{}
|
||||
|
||||
type controllerManager struct {
|
||||
// config is the rest.config used to talk to the apiserver. Required.
|
||||
config *rest.Config
|
||||
|
||||
// scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults
|
||||
// to scheme.scheme.
|
||||
scheme *runtime.Scheme
|
||||
// cluster holds a variety of methods to interact with a cluster. Required.
|
||||
cluster cluster.Cluster
|
||||
|
||||
// leaderElectionRunnables is the set of Controllers that the controllerManager injects deps into and Starts.
|
||||
// These Runnables are managed by lead election.
|
||||
leaderElectionRunnables []Runnable
|
||||
|
||||
// nonLeaderElectionRunnables is the set of webhook servers that the controllerManager injects deps into and Starts.
|
||||
// These Runnables will not be blocked by lead election.
|
||||
nonLeaderElectionRunnables []Runnable
|
||||
|
||||
cache cache.Cache
|
||||
|
||||
// TODO(directxman12): Provide an escape hatch to get individual indexers
|
||||
// client is the client injected into Controllers (and EventHandlers, Sources and Predicates).
|
||||
client client.Client
|
||||
|
||||
// apiReader is the reader that will make requests to the api server and not the cache.
|
||||
apiReader client.Reader
|
||||
|
||||
// fieldIndexes knows how to add field indexes over the Cache used by this controller,
|
||||
// which can later be consumed via field selectors from the injected client.
|
||||
fieldIndexes client.FieldIndexer
|
||||
|
||||
// recorderProvider is used to generate event recorders that will be injected into Controllers
|
||||
// (and EventHandlers, Sources and Predicates).
|
||||
recorderProvider recorder.Provider
|
||||
recorderProvider *intrec.Provider
|
||||
|
||||
// resourceLock forms the basis for leader election
|
||||
resourceLock resourcelock.Interface
|
||||
|
||||
// mapper is used to map resources to kind, and map kind and version.
|
||||
mapper meta.RESTMapper
|
||||
// leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease
|
||||
// on shutdown
|
||||
leaderElectionReleaseOnCancel bool
|
||||
|
||||
// metricsListener is used to serve prometheus metrics
|
||||
metricsListener net.Listener
|
||||
@ -118,28 +108,33 @@ type controllerManager struct {
|
||||
started bool
|
||||
startedLeader bool
|
||||
healthzStarted bool
|
||||
errChan chan error
|
||||
|
||||
// NB(directxman12): we don't just use an error channel here to avoid the situation where the
|
||||
// error channel is too small and we end up blocking some goroutines waiting to report their errors.
|
||||
// errSignal lets us track when we should stop because an error occurred
|
||||
errSignal *errSignaler
|
||||
// controllerOptions are the global controller options.
|
||||
controllerOptions v1alpha1.ControllerConfigurationSpec
|
||||
|
||||
// internalStop is the stop channel *actually* used by everything involved
|
||||
// with the manager as a stop channel, so that we can pass a stop channel
|
||||
// to things that need it off the bat (like the Channel source). It can
|
||||
// be closed via `internalStopper` (by being the same underlying channel).
|
||||
internalStop <-chan struct{}
|
||||
// Logger is the logger that should be used by this manager.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
logger logr.Logger
|
||||
|
||||
// internalStopper is the write side of the internal stop channel, allowing us to close it.
|
||||
// It and `internalStop` should point to the same channel.
|
||||
internalStopper chan<- struct{}
|
||||
// leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper,
|
||||
// because for safety reasons we need to os.Exit() when we lose the leader election, meaning that
|
||||
// it must be deferred until after gracefulShutdown is done.
|
||||
leaderElectionCancel context.CancelFunc
|
||||
|
||||
// leaderElectionStopped is an internal channel used to signal the stopping procedure that the
|
||||
// LeaderElection.Run(...) function has returned and the shutdown can proceed.
|
||||
leaderElectionStopped chan struct{}
|
||||
|
||||
// stop procedure engaged. In other words, we should not add anything else to the manager
|
||||
stopProcedureEngaged bool
|
||||
|
||||
// elected is closed when this manager becomes the leader of a group of
|
||||
// managers, either because it won a leader election or because no leader
|
||||
// election was configured.
|
||||
elected chan struct{}
|
||||
|
||||
startCache func(stop <-chan struct{}) error
|
||||
caches []hasCache
|
||||
|
||||
// port is the port that the webhook server serves at.
|
||||
port int
|
||||
@ -151,67 +146,57 @@ type controllerManager struct {
|
||||
certDir string
|
||||
|
||||
webhookServer *webhook.Server
|
||||
// webhookServerOnce will be called in GetWebhookServer() to optionally initialize
|
||||
// webhookServer if unset, and Add() it to controllerManager.
|
||||
webhookServerOnce sync.Once
|
||||
|
||||
// leaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership.
|
||||
leaseDuration time.Duration
|
||||
// renewDeadline is the duration that the acting master will retry
|
||||
// renewDeadline is the duration that the acting controlplane will retry
|
||||
// refreshing leadership before giving up.
|
||||
renewDeadline time.Duration
|
||||
// retryPeriod is the duration the LeaderElector clients should wait
|
||||
// between tries of actions.
|
||||
retryPeriod time.Duration
|
||||
|
||||
// waitForRunnable is holding the number of runnables currently running so that
|
||||
// we can wait for them to exit before quitting the manager
|
||||
waitForRunnable sync.WaitGroup
|
||||
|
||||
// gracefulShutdownTimeout is the duration given to runnable to stop
|
||||
// before the manager actually returns on stop.
|
||||
gracefulShutdownTimeout time.Duration
|
||||
|
||||
// onStoppedLeading is callled when the leader election lease is lost.
|
||||
// It can be overridden for tests.
|
||||
onStoppedLeading func()
|
||||
|
||||
// shutdownCtx is the context that can be used during shutdown. It will be cancelled
|
||||
// after the gracefulShutdownTimeout ended. It must not be accessed before internalStop
|
||||
// is closed because it will be nil.
|
||||
shutdownCtx context.Context
|
||||
|
||||
internalCtx context.Context
|
||||
internalCancel context.CancelFunc
|
||||
|
||||
// internalProceduresStop channel is used internally to the manager when coordinating
|
||||
// the proper shutdown of servers. This channel is also used for dependency injection.
|
||||
internalProceduresStop chan struct{}
|
||||
}
|
||||
|
||||
type errSignaler struct {
|
||||
// errSignal indicates that an error occurred, when closed. It shouldn't
|
||||
// be written to.
|
||||
errSignal chan struct{}
|
||||
|
||||
// err is the received error
|
||||
err error
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *errSignaler) SignalError(err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err == nil {
|
||||
// non-error, ignore
|
||||
log.Error(nil, "SignalError called without an (with a nil) error, which should never happen, ignoring")
|
||||
return
|
||||
}
|
||||
|
||||
if r.err != nil {
|
||||
// we already have an error, don't try again
|
||||
return
|
||||
}
|
||||
|
||||
// save the error and report it
|
||||
r.err = err
|
||||
close(r.errSignal)
|
||||
}
|
||||
|
||||
func (r *errSignaler) Error() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.err
|
||||
}
|
||||
|
||||
func (r *errSignaler) GotError() chan struct{} {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.errSignal
|
||||
type hasCache interface {
|
||||
Runnable
|
||||
GetCache() cache.Cache
|
||||
}
|
||||
|
||||
// Add sets dependencies on i, and adds it to the list of Runnables to start.
|
||||
func (cm *controllerManager) Add(r Runnable) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new runnable as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
// Set dependencies on the object
|
||||
if err := cm.SetFields(r); err != nil {
|
||||
@ -224,6 +209,8 @@ func (cm *controllerManager) Add(r Runnable) error {
|
||||
if leRunnable, ok := r.(LeaderElectionRunnable); ok && !leRunnable.NeedLeaderElection() {
|
||||
shouldStart = cm.started
|
||||
cm.nonLeaderElectionRunnables = append(cm.nonLeaderElectionRunnables, r)
|
||||
} else if hasCache, ok := r.(hasCache); ok {
|
||||
cm.caches = append(cm.caches, hasCache)
|
||||
} else {
|
||||
shouldStart = cm.startedLeader
|
||||
cm.leaderElectionRunnables = append(cm.leaderElectionRunnables, r)
|
||||
@ -231,41 +218,27 @@ func (cm *controllerManager) Add(r Runnable) error {
|
||||
|
||||
if shouldStart {
|
||||
// If already started, start the controller
|
||||
go func() {
|
||||
if err := r.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
}()
|
||||
cm.startRunnable(r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10.
|
||||
func (cm *controllerManager) SetFields(i interface{}) error {
|
||||
if _, err := inject.ConfigInto(cm.config, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.ClientInto(cm.client, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.APIReaderInto(cm.apiReader, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.SchemeInto(cm.scheme, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.CacheInto(cm.cache, i); err != nil {
|
||||
if err := cm.cluster.SetFields(i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.InjectorInto(cm.SetFields, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.StopChannelInto(cm.internalStop, i); err != nil {
|
||||
if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.MapperInto(cm.mapper, i); err != nil {
|
||||
if _, err := inject.LoggerInto(cm.logger, i); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -278,21 +251,24 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
_, found := cm.metricsExtraHandlers[path]
|
||||
if found {
|
||||
if _, found := cm.metricsExtraHandlers[path]; found {
|
||||
return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path)
|
||||
}
|
||||
|
||||
cm.metricsExtraHandlers[path] = handler
|
||||
log.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||
cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddHealthzCheck allows you to add Healthz checker
|
||||
// AddHealthzCheck allows you to add Healthz checker.
|
||||
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new healthCheck as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
if cm.healthzStarted {
|
||||
return fmt.Errorf("unable to add new checker because healthz endpoint has already been created")
|
||||
}
|
||||
@ -305,11 +281,15 @@ func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddReadyzCheck allows you to add Readyz checker
|
||||
// AddReadyzCheck allows you to add Readyz checker.
|
||||
func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new ready check as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
if cm.healthzStarted {
|
||||
return fmt.Errorf("unable to add new checker because readyz endpoint has already been created")
|
||||
}
|
||||
@ -323,52 +303,62 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetConfig() *rest.Config {
|
||||
return cm.config
|
||||
return cm.cluster.GetConfig()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetClient() client.Client {
|
||||
return cm.client
|
||||
return cm.cluster.GetClient()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetScheme() *runtime.Scheme {
|
||||
return cm.scheme
|
||||
return cm.cluster.GetScheme()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer {
|
||||
return cm.fieldIndexes
|
||||
return cm.cluster.GetFieldIndexer()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetCache() cache.Cache {
|
||||
return cm.cache
|
||||
return cm.cluster.GetCache()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return cm.recorderProvider.GetEventRecorderFor(name)
|
||||
return cm.cluster.GetEventRecorderFor(name)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetRESTMapper() meta.RESTMapper {
|
||||
return cm.mapper
|
||||
return cm.cluster.GetRESTMapper()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetAPIReader() client.Reader {
|
||||
return cm.apiReader
|
||||
return cm.cluster.GetAPIReader()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetWebhookServer() *webhook.Server {
|
||||
if cm.webhookServer == nil {
|
||||
cm.webhookServer = &webhook.Server{
|
||||
Port: cm.port,
|
||||
Host: cm.host,
|
||||
CertDir: cm.certDir,
|
||||
cm.webhookServerOnce.Do(func() {
|
||||
if cm.webhookServer == nil {
|
||||
cm.webhookServer = &webhook.Server{
|
||||
Port: cm.port,
|
||||
Host: cm.host,
|
||||
CertDir: cm.certDir,
|
||||
}
|
||||
}
|
||||
if err := cm.Add(cm.webhookServer); err != nil {
|
||||
panic("unable to add webhookServer to the controller manager")
|
||||
panic("unable to add webhook server to the controller manager")
|
||||
}
|
||||
}
|
||||
})
|
||||
return cm.webhookServer
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveMetrics(stop <-chan struct{}) {
|
||||
func (cm *controllerManager) GetLogger() logr.Logger {
|
||||
return cm.logger
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec {
|
||||
return cm.controllerOptions
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveMetrics() {
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{
|
||||
ErrorHandling: promhttp.HTTPErrorOnError,
|
||||
})
|
||||
@ -389,113 +379,228 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) {
|
||||
Handler: mux,
|
||||
}
|
||||
// Run the server
|
||||
go func() {
|
||||
log.Info("starting metrics server", "path", defaultMetricsEndpoint)
|
||||
cm.startRunnable(RunnableFunc(func(_ context.Context) error {
|
||||
cm.logger.Info("starting metrics server", "path", defaultMetricsEndpoint)
|
||||
if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed {
|
||||
cm.errSignal.SignalError(err)
|
||||
return err
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}))
|
||||
|
||||
// Shutdown the server when stop is closed
|
||||
<-stop
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
<-cm.internalProceduresStop
|
||||
if err := server.Shutdown(cm.shutdownCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) {
|
||||
// TODO(hypnoglow): refactor locking to use anonymous func in the similar way
|
||||
// it's done in serveMetrics.
|
||||
cm.mu.Lock()
|
||||
func (cm *controllerManager) serveHealthProbes() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
if cm.readyzHandler != nil {
|
||||
mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
}
|
||||
if cm.healthzHandler != nil {
|
||||
mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
}
|
||||
|
||||
server := http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
// Run server
|
||||
go func() {
|
||||
if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed {
|
||||
cm.errSignal.SignalError(err)
|
||||
|
||||
func() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.readyzHandler != nil {
|
||||
mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
// Append '/' suffix to handle subpaths
|
||||
mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
}
|
||||
if cm.healthzHandler != nil {
|
||||
mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
// Append '/' suffix to handle subpaths
|
||||
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
}
|
||||
|
||||
// Run server
|
||||
cm.startRunnable(RunnableFunc(func(_ context.Context) error {
|
||||
if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
cm.healthzStarted = true
|
||||
}()
|
||||
cm.healthzStarted = true
|
||||
cm.mu.Unlock()
|
||||
|
||||
// Shutdown the server when stop is closed
|
||||
<-stop
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
<-cm.internalProceduresStop
|
||||
if err := server.Shutdown(cm.shutdownCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *controllerManager) Start(stop <-chan struct{}) error {
|
||||
// join the passed-in stop channel as an upstream feeding into cm.internalStopper
|
||||
defer close(cm.internalStopper)
|
||||
func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
||||
if err := cm.Add(cm.cluster); err != nil {
|
||||
return fmt.Errorf("failed to add cluster to runnables: %w", err)
|
||||
}
|
||||
cm.internalCtx, cm.internalCancel = context.WithCancel(ctx)
|
||||
|
||||
// This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request
|
||||
stopComplete := make(chan struct{})
|
||||
defer close(stopComplete)
|
||||
// This must be deferred after closing stopComplete, otherwise we deadlock.
|
||||
defer func() {
|
||||
// https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg
|
||||
stopErr := cm.engageStopProcedure(stopComplete)
|
||||
if stopErr != nil {
|
||||
if err != nil {
|
||||
// Utilerrors.Aggregate allows to use errors.Is for all contained errors
|
||||
// whereas fmt.Errorf allows wrapping at most one error which means the
|
||||
// other one can not be found anymore.
|
||||
err = kerrors.NewAggregate([]error{err, stopErr})
|
||||
} else {
|
||||
err = stopErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// initialize this here so that we reset the signal channel state on every start
|
||||
cm.errSignal = &errSignaler{errSignal: make(chan struct{})}
|
||||
// Everything that might write into this channel must be started in a new goroutine,
|
||||
// because otherwise we might block this routine trying to write into the full channel
|
||||
// and will not be able to enter the deferred cm.engageStopProcedure() which drains
|
||||
// it.
|
||||
cm.errChan = make(chan error)
|
||||
|
||||
// Metrics should be served whether the controller is leader or not.
|
||||
// (If we don't serve metrics for non-leaders, prometheus will still scrape
|
||||
// the pod but will get a connection refused)
|
||||
if cm.metricsListener != nil {
|
||||
go cm.serveMetrics(cm.internalStop)
|
||||
go cm.serveMetrics()
|
||||
}
|
||||
|
||||
// Serve health probes
|
||||
if cm.healthProbeListener != nil {
|
||||
go cm.serveHealthProbes(cm.internalStop)
|
||||
go cm.serveHealthProbes()
|
||||
}
|
||||
|
||||
go cm.startNonLeaderElectionRunnables()
|
||||
|
||||
if cm.resourceLock != nil {
|
||||
err := cm.startLeaderElection()
|
||||
if err != nil {
|
||||
return err
|
||||
go func() {
|
||||
if cm.resourceLock != nil {
|
||||
err := cm.startLeaderElection()
|
||||
if err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
} else {
|
||||
// Treat not having leader election enabled the same as being elected.
|
||||
cm.startLeaderElectionRunnables()
|
||||
close(cm.elected)
|
||||
}
|
||||
} else {
|
||||
// Treat not having leader election enabled the same as being elected.
|
||||
close(cm.elected)
|
||||
go cm.startLeaderElectionRunnables()
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-stop:
|
||||
case <-ctx.Done():
|
||||
// We are done
|
||||
return nil
|
||||
case <-cm.errSignal.GotError():
|
||||
// Error starting a controller
|
||||
return cm.errSignal.Error()
|
||||
case err := <-cm.errChan:
|
||||
// Error starting or running a runnable
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// engageStopProcedure signals all runnables to stop, reads potential errors
|
||||
// from the errChan and waits for them to end. It must not be called more than once.
|
||||
func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error {
|
||||
// Populate the shutdown context.
|
||||
var shutdownCancel context.CancelFunc
|
||||
if cm.gracefulShutdownTimeout > 0 {
|
||||
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
|
||||
} else {
|
||||
cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background())
|
||||
}
|
||||
defer shutdownCancel()
|
||||
|
||||
// Cancel the internal stop channel and wait for the procedures to stop and complete.
|
||||
close(cm.internalProceduresStop)
|
||||
cm.internalCancel()
|
||||
|
||||
// Start draining the errors before acquiring the lock to make sure we don't deadlock
|
||||
// if something that has the lock is blocked on trying to write into the unbuffered
|
||||
// channel after something else already wrote into it.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case err, ok := <-cm.errChan:
|
||||
if ok {
|
||||
cm.logger.Error(err, "error received after stop sequence was engaged")
|
||||
}
|
||||
case <-stopComplete:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if cm.gracefulShutdownTimeout == 0 {
|
||||
return nil
|
||||
}
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
cm.stopProcedureEngaged = true
|
||||
|
||||
// we want to close this after the other runnables stop, because we don't
|
||||
// want things like leader election to try and emit events on a closed
|
||||
// channel
|
||||
defer cm.recorderProvider.Stop(cm.shutdownCtx)
|
||||
return cm.waitForRunnableToEnd(shutdownCancel)
|
||||
}
|
||||
|
||||
// waitForRunnableToEnd blocks until all runnables ended or the
|
||||
// tearDownTimeout was reached. In the latter case, an error is returned.
|
||||
func (cm *controllerManager) waitForRunnableToEnd(shutdownCancel context.CancelFunc) (retErr error) {
|
||||
// Cancel leader election only after we waited. It will os.Exit() the app for safety.
|
||||
defer func() {
|
||||
if retErr == nil && cm.leaderElectionCancel != nil {
|
||||
// After asking the context to be cancelled, make sure
|
||||
// we wait for the leader stopped channel to be closed, otherwise
|
||||
// we might encounter race conditions between this code
|
||||
// and the event recorder, which is used within leader election code.
|
||||
cm.leaderElectionCancel()
|
||||
<-cm.leaderElectionStopped
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
cm.waitForRunnable.Wait()
|
||||
shutdownCancel()
|
||||
}()
|
||||
|
||||
<-cm.shutdownCtx.Done()
|
||||
if err := cm.shutdownCtx.Err(); err != nil && err != context.Canceled {
|
||||
return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startNonLeaderElectionRunnables() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
cm.waitForCache()
|
||||
// First start any webhook servers, which includes conversion, validation, and defaulting
|
||||
// webhooks that are registered.
|
||||
//
|
||||
// WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition
|
||||
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
|
||||
// to never start because no cache can be populated.
|
||||
for _, c := range cm.nonLeaderElectionRunnables {
|
||||
if _, ok := c.(*webhook.Server); ok {
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Start and wait for caches.
|
||||
cm.waitForCache(cm.internalCtx)
|
||||
|
||||
// Start the non-leaderelection Runnables after the cache has synced
|
||||
for _, c := range cm.nonLeaderElectionRunnables {
|
||||
if _, ok := c.(*webhook.Server); ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Controllers block, but we want to return an error if any have an error starting.
|
||||
// Write any Start errors to a channel so we can return them
|
||||
ctrl := c
|
||||
go func() {
|
||||
if err := ctrl.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
// we use %T here because we don't have a good stand-in for "name",
|
||||
// and the full runnable might not serialize (mutexes, etc)
|
||||
log.V(1).Info("non-leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl))
|
||||
}()
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
}
|
||||
|
||||
@ -503,48 +608,56 @@ func (cm *controllerManager) startLeaderElectionRunnables() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
cm.waitForCache()
|
||||
cm.waitForCache(cm.internalCtx)
|
||||
|
||||
// Start the leader election Runnables after the cache has synced
|
||||
for _, c := range cm.leaderElectionRunnables {
|
||||
// Controllers block, but we want to return an error if any have an error starting.
|
||||
// Write any Start errors to a channel so we can return them
|
||||
ctrl := c
|
||||
go func() {
|
||||
if err := ctrl.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
// we use %T here because we don't have a good stand-in for "name",
|
||||
// and the full runnable might not serialize (mutexes, etc)
|
||||
log.V(1).Info("leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl))
|
||||
}()
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
|
||||
cm.startedLeader = true
|
||||
}
|
||||
|
||||
func (cm *controllerManager) waitForCache() {
|
||||
func (cm *controllerManager) waitForCache(ctx context.Context) {
|
||||
if cm.started {
|
||||
return
|
||||
}
|
||||
|
||||
// Start the Cache. Allow the function to start the cache to be mocked out for testing
|
||||
if cm.startCache == nil {
|
||||
cm.startCache = cm.cache.Start
|
||||
for _, cache := range cm.caches {
|
||||
cm.startRunnable(cache)
|
||||
}
|
||||
go func() {
|
||||
if err := cm.startCache(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the caches to sync.
|
||||
// TODO(community): Check the return value and write a test
|
||||
cm.cache.WaitForCacheSync(cm.internalStop)
|
||||
for _, cache := range cm.caches {
|
||||
cache.GetCache().WaitForCacheSync(ctx)
|
||||
}
|
||||
// TODO: This should be the return value of cm.cache.WaitForCacheSync but we abuse
|
||||
// cm.started as check if we already started the cache so it must always become true.
|
||||
// Making sure that the cache doesn't get started twice is needed to not get a "close
|
||||
// of closed channel" panic
|
||||
cm.started = true
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startLeaderElection() (err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cm.mu.Lock()
|
||||
cm.leaderElectionCancel = cancel
|
||||
cm.mu.Unlock()
|
||||
|
||||
if cm.onStoppedLeading == nil {
|
||||
cm.onStoppedLeading = func() {
|
||||
// Make sure graceful shutdown is skipped if we lost the leader lock without
|
||||
// intending to.
|
||||
cm.gracefulShutdownTimeout = time.Duration(0)
|
||||
// Most implementations of leader election log.Fatal() here.
|
||||
// Since Start is wrapped in log.Fatal when called, we can just return
|
||||
// an error here which will cause the program to exit.
|
||||
cm.errChan <- errors.New("leader election lost")
|
||||
}
|
||||
}
|
||||
l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: cm.resourceLock,
|
||||
LeaseDuration: cm.leaseDuration,
|
||||
@ -552,35 +665,36 @@ func (cm *controllerManager) startLeaderElection() (err error) {
|
||||
RetryPeriod: cm.retryPeriod,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(_ context.Context) {
|
||||
close(cm.elected)
|
||||
cm.startLeaderElectionRunnables()
|
||||
close(cm.elected)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
// Most implementations of leader election log.Fatal() here.
|
||||
// Since Start is wrapped in log.Fatal when called, we can just return
|
||||
// an error here which will cause the program to exit.
|
||||
cm.errSignal.SignalError(fmt.Errorf("leader election lost"))
|
||||
},
|
||||
OnStoppedLeading: cm.onStoppedLeading,
|
||||
},
|
||||
ReleaseOnCancel: cm.leaderElectionReleaseOnCancel,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
select {
|
||||
case <-cm.internalStop:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the leader elector process
|
||||
go l.Run(ctx)
|
||||
go func() {
|
||||
l.Run(ctx)
|
||||
<-ctx.Done()
|
||||
close(cm.leaderElectionStopped)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) Elected() <-chan struct{} {
|
||||
return cm.elected
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startRunnable(r Runnable) {
|
||||
cm.waitForRunnable.Add(1)
|
||||
go func() {
|
||||
defer cm.waitForRunnable.Done()
|
||||
if err := r.Start(cm.internalCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
454
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
454
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
@ -17,33 +17,41 @@ limitations under the License.
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables.
|
||||
// A Manager is required to create Controllers.
|
||||
type Manager interface {
|
||||
// Cluster holds a variety of methods to interact with a cluster.
|
||||
cluster.Cluster
|
||||
|
||||
// Add will set requested dependencies on the component, and cause the component to be
|
||||
// started when Start is called. Add will inject any dependencies for which the argument
|
||||
// implements the inject interface - e.g. inject.Client.
|
||||
@ -56,10 +64,6 @@ type Manager interface {
|
||||
// election was configured.
|
||||
Elected() <-chan struct{}
|
||||
|
||||
// SetFields will set any dependencies on an object for which the object has implemented the inject
|
||||
// interface - e.g. inject.Client.
|
||||
SetFields(interface{}) error
|
||||
|
||||
// AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics.
|
||||
// Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be
|
||||
// sensitive and shouldn't be exposed publicly.
|
||||
@ -73,44 +77,25 @@ type Manager interface {
|
||||
// AddReadyzCheck allows you to add Readyz checker
|
||||
AddReadyzCheck(name string, check healthz.Checker) error
|
||||
|
||||
// Start starts all registered Controllers and blocks until the Stop channel is closed.
|
||||
// Start starts all registered Controllers and blocks until the context is cancelled.
|
||||
// Returns an error if there is an error starting any controller.
|
||||
Start(<-chan struct{}) error
|
||||
|
||||
// GetConfig returns an initialized Config
|
||||
GetConfig() *rest.Config
|
||||
|
||||
// GetScheme returns an initialized Scheme
|
||||
GetScheme() *runtime.Scheme
|
||||
|
||||
// GetClient returns a client configured with the Config. This client may
|
||||
// not be a fully "direct" client -- it may read from a cache, for
|
||||
// instance. See Options.NewClient for more information on how the default
|
||||
// implementation works.
|
||||
GetClient() client.Client
|
||||
|
||||
// GetFieldIndexer returns a client.FieldIndexer configured with the client
|
||||
GetFieldIndexer() client.FieldIndexer
|
||||
|
||||
// GetCache returns a cache.Cache
|
||||
GetCache() cache.Cache
|
||||
|
||||
// GetEventRecorderFor returns a new EventRecorder for the provided name
|
||||
GetEventRecorderFor(name string) record.EventRecorder
|
||||
|
||||
// GetRESTMapper returns a RESTMapper
|
||||
GetRESTMapper() meta.RESTMapper
|
||||
|
||||
// GetAPIReader returns a reader that will be configured to use the API server.
|
||||
// This should be used sparingly and only when the client does not fit your
|
||||
// use case.
|
||||
GetAPIReader() client.Reader
|
||||
//
|
||||
// If LeaderElection is used, the binary must be exited immediately after this returns,
|
||||
// otherwise components that need leader election might continue to run after the leader
|
||||
// lock was lost.
|
||||
Start(ctx context.Context) error
|
||||
|
||||
// GetWebhookServer returns a webhook.Server
|
||||
GetWebhookServer() *webhook.Server
|
||||
|
||||
// GetLogger returns this manager's logger.
|
||||
GetLogger() logr.Logger
|
||||
|
||||
// GetControllerOptions returns controller global configuration options.
|
||||
GetControllerOptions() v1alpha1.ControllerConfigurationSpec
|
||||
}
|
||||
|
||||
// Options are the arguments for creating a new Manager
|
||||
// Options are the arguments for creating a new Manager.
|
||||
type Options struct {
|
||||
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
|
||||
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
|
||||
@ -126,25 +111,74 @@ type Options struct {
|
||||
// value only if you know what you are doing. Defaults to 10 hours if unset.
|
||||
// there will a 10 percent jitter between the SyncPeriod of all controllers
|
||||
// so that all controllers will not send list requests simultaneously.
|
||||
//
|
||||
// This applies to all controllers.
|
||||
//
|
||||
// A period sync happens for two reasons:
|
||||
// 1. To insure against a bug in the controller that causes an object to not
|
||||
// be requeued, when it otherwise should be requeued.
|
||||
// 2. To insure against an unknown bug in controller-runtime, or its dependencies,
|
||||
// that causes an object to not be requeued, when it otherwise should be
|
||||
// requeued, or to be removed from the queue, when it otherwise should not
|
||||
// be removed.
|
||||
//
|
||||
// If you want
|
||||
// 1. to insure against missed watch events, or
|
||||
// 2. to poll services that cannot be watched,
|
||||
// then we recommend that, instead of changing the default period, the
|
||||
// controller requeue, with a constant duration `t`, whenever the controller
|
||||
// is "done" with an object, and would otherwise not requeue it, i.e., we
|
||||
// recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
|
||||
// instead of `reconcile.Result{}`.
|
||||
SyncPeriod *time.Duration
|
||||
|
||||
// Logger is the logger that should be used by this manager.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
Logger logr.Logger
|
||||
|
||||
// LeaderElection determines whether or not to use leader election when
|
||||
// starting the manager.
|
||||
LeaderElection bool
|
||||
|
||||
// LeaderElectionResourceLock determines which resource lock to use for leader election,
|
||||
// defaults to "configmapsleases". Change this value only if you know what you are doing.
|
||||
// Otherwise, users of your controller might end up with multiple running instances that
|
||||
// each acquired leadership through different resource locks during upgrades and thus
|
||||
// act on the same resources concurrently.
|
||||
// If you want to migrate to the "leases" resource lock, you might do so by migrating to the
|
||||
// respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a
|
||||
// leader lock on both resources. After all your users have migrated to the multilock, you can
|
||||
// go ahead and migrate to "leases". Please also keep in mind, that users might skip versions
|
||||
// of your controller.
|
||||
//
|
||||
// Note: before controller-runtime version v0.7, the resource lock was set to "configmaps".
|
||||
// Please keep this in mind, when planning a proper migration path for your controller.
|
||||
LeaderElectionResourceLock string
|
||||
|
||||
// LeaderElectionNamespace determines the namespace in which the leader
|
||||
// election configmap will be created.
|
||||
// election resource will be created.
|
||||
LeaderElectionNamespace string
|
||||
|
||||
// LeaderElectionID determines the name of the configmap that leader election
|
||||
// LeaderElectionID determines the name of the resource that leader election
|
||||
// will use for holding the leader lock.
|
||||
LeaderElectionID string
|
||||
|
||||
// LeaderElectionConfig can be specified to override the default configuration
|
||||
// that is used to build the leader election client.
|
||||
LeaderElectionConfig *rest.Config
|
||||
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader doesn't have to wait
|
||||
// LeaseDuration time first.
|
||||
LeaderElectionReleaseOnCancel bool
|
||||
|
||||
// LeaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership. This is measured against time of
|
||||
// last observed ack. Default is 15 seconds.
|
||||
LeaseDuration *time.Duration
|
||||
// RenewDeadline is the duration that the acting master will retry
|
||||
// RenewDeadline is the duration that the acting controlplane will retry
|
||||
// refreshing leadership before giving up. Default is 10 seconds.
|
||||
RenewDeadline *time.Duration
|
||||
// RetryPeriod is the duration the LeaderElector clients should wait
|
||||
@ -175,27 +209,38 @@ type Options struct {
|
||||
LivenessEndpointName string
|
||||
|
||||
// Port is the port that the webhook server serves at.
|
||||
// It is used to set webhook.Server.Port.
|
||||
// It is used to set webhook.Server.Port if WebhookServer is not set.
|
||||
Port int
|
||||
// Host is the hostname that the webhook server binds to.
|
||||
// It is used to set webhook.Server.Host.
|
||||
// It is used to set webhook.Server.Host if WebhookServer is not set.
|
||||
Host string
|
||||
|
||||
// CertDir is the directory that contains the server key and certificate.
|
||||
// if not set, webhook server would look up the server key and certificate in
|
||||
// If not set, webhook server would look up the server key and certificate in
|
||||
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
|
||||
// must be named tls.key and tls.crt, respectively.
|
||||
// It is used to set webhook.Server.CertDir if WebhookServer is not set.
|
||||
CertDir string
|
||||
|
||||
// WebhookServer is an externally configured webhook.Server. By default,
|
||||
// a Manager will create a default server using Port, Host, and CertDir;
|
||||
// if this is set, the Manager will use this server instead.
|
||||
WebhookServer *webhook.Server
|
||||
|
||||
// Functions to all for a user to customize the values that will be injected.
|
||||
|
||||
// NewCache is the function that will create the cache to be used
|
||||
// by the manager. If not set this will use the default new cache function.
|
||||
NewCache cache.NewCacheFunc
|
||||
|
||||
// NewClient will create the client to be used by the manager.
|
||||
// NewClient is the func that creates the client to be used by the manager.
|
||||
// If not set this will create the default DelegatingClient that will
|
||||
// use the cache for reads and the client for writes.
|
||||
NewClient NewClientFunc
|
||||
NewClient cluster.NewClientFunc
|
||||
|
||||
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
|
||||
// for the given objects.
|
||||
ClientDisableCacheFor []client.Object
|
||||
|
||||
// DryRunClient specifies whether the client should be configured to enforce
|
||||
// dryRun mode.
|
||||
@ -203,36 +248,53 @@ type Options struct {
|
||||
|
||||
// EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API
|
||||
// Use this to customize the event correlator and spam filter
|
||||
//
|
||||
// Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers
|
||||
// is shorter than the lifetime of your process.
|
||||
EventBroadcaster record.EventBroadcaster
|
||||
|
||||
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
|
||||
// To disable graceful shutdown, set to time.Duration(0)
|
||||
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
|
||||
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
|
||||
GracefulShutdownTimeout *time.Duration
|
||||
|
||||
// Controller contains global configuration options for controllers
|
||||
// registered within this manager.
|
||||
// +optional
|
||||
Controller v1alpha1.ControllerConfigurationSpec
|
||||
|
||||
// makeBroadcaster allows deferring the creation of the broadcaster to
|
||||
// avoid leaking goroutines if we never call Start on this manager. It also
|
||||
// returns whether or not this is a "owned" broadcaster, and as such should be
|
||||
// stopped with the manager.
|
||||
makeBroadcaster intrec.EventBroadcasterProducer
|
||||
|
||||
// Dependency injection for testing
|
||||
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error)
|
||||
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error)
|
||||
newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error)
|
||||
newMetricsListener func(addr string) (net.Listener, error)
|
||||
newHealthProbeListener func(addr string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// NewClientFunc allows a user to define how to create a client
|
||||
type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error)
|
||||
|
||||
// Runnable allows a component to be started.
|
||||
// It's very important that Start blocks until
|
||||
// it's done running.
|
||||
type Runnable interface {
|
||||
// Start starts running the component. The component will stop running
|
||||
// when the channel is closed. Start blocks until the channel is closed or
|
||||
// when the context is closed. Start blocks until the context is closed or
|
||||
// an error occurs.
|
||||
Start(<-chan struct{}) error
|
||||
Start(context.Context) error
|
||||
}
|
||||
|
||||
// RunnableFunc implements Runnable using a function.
|
||||
// It's very important that the given function block
|
||||
// until it's done running.
|
||||
type RunnableFunc func(<-chan struct{}) error
|
||||
type RunnableFunc func(context.Context) error
|
||||
|
||||
// Start implements Runnable
|
||||
func (r RunnableFunc) Start(s <-chan struct{}) error {
|
||||
return r(s)
|
||||
// Start implements Runnable.
|
||||
func (r RunnableFunc) Start(ctx context.Context) error {
|
||||
return r(ctx)
|
||||
}
|
||||
|
||||
// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode.
|
||||
@ -244,54 +306,43 @@ type LeaderElectionRunnable interface {
|
||||
|
||||
// New returns a new Manager for creating Controllers.
|
||||
func New(config *rest.Config, options Options) (Manager, error) {
|
||||
// Initialize a rest.config if none was specified
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("must specify Config")
|
||||
}
|
||||
|
||||
// Set default values for options fields
|
||||
options = setOptionsDefaults(options)
|
||||
|
||||
// Create the mapper provider
|
||||
mapper, err := options.MapperProvider(config)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get API Group-Resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the cache for the cached read client and registering informers
|
||||
cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace})
|
||||
cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) {
|
||||
clusterOptions.Scheme = options.Scheme
|
||||
clusterOptions.MapperProvider = options.MapperProvider
|
||||
clusterOptions.Logger = options.Logger
|
||||
clusterOptions.SyncPeriod = options.SyncPeriod
|
||||
clusterOptions.Namespace = options.Namespace
|
||||
clusterOptions.NewCache = options.NewCache
|
||||
clusterOptions.NewClient = options.NewClient
|
||||
clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor
|
||||
clusterOptions.DryRunClient = options.DryRunClient
|
||||
clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiReader, err := client.New(config, client.Options{Scheme: options.Scheme, Mapper: mapper})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writeObj, err := options.NewClient(cache, config, client.Options{Scheme: options.Scheme, Mapper: mapper})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if options.DryRunClient {
|
||||
writeObj = client.NewDryRunClient(writeObj)
|
||||
}
|
||||
|
||||
// Create the recorder provider to inject event recorders for the components.
|
||||
// TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific
|
||||
// to the particular controller that it's being injected into, rather than a generic one like is here.
|
||||
recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events"), options.EventBroadcaster)
|
||||
recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the resource lock to enable leader election)
|
||||
resourceLock, err := options.newResourceLock(config, recorderProvider, leaderelection.Options{
|
||||
LeaderElection: options.LeaderElection,
|
||||
LeaderElectionID: options.LeaderElectionID,
|
||||
LeaderElectionNamespace: options.LeaderElectionNamespace,
|
||||
leaderConfig := options.LeaderElectionConfig
|
||||
if leaderConfig == nil {
|
||||
leaderConfig = rest.CopyConfig(config)
|
||||
}
|
||||
resourceLock, err := options.newResourceLock(leaderConfig, recorderProvider, leaderelection.Options{
|
||||
LeaderElection: options.LeaderElection,
|
||||
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
|
||||
LeaderElectionID: options.LeaderElectionID,
|
||||
LeaderElectionNamespace: options.LeaderElectionNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -314,54 +365,141 @@ func New(config *rest.Config, options Options) (Manager, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
|
||||
return &controllerManager{
|
||||
config: config,
|
||||
scheme: options.Scheme,
|
||||
cache: cache,
|
||||
fieldIndexes: cache,
|
||||
client: writeObj,
|
||||
apiReader: apiReader,
|
||||
recorderProvider: recorderProvider,
|
||||
resourceLock: resourceLock,
|
||||
mapper: mapper,
|
||||
metricsListener: metricsListener,
|
||||
metricsExtraHandlers: metricsExtraHandlers,
|
||||
internalStop: stop,
|
||||
internalStopper: stop,
|
||||
elected: make(chan struct{}),
|
||||
port: options.Port,
|
||||
host: options.Host,
|
||||
certDir: options.CertDir,
|
||||
leaseDuration: *options.LeaseDuration,
|
||||
renewDeadline: *options.RenewDeadline,
|
||||
retryPeriod: *options.RetryPeriod,
|
||||
healthProbeListener: healthProbeListener,
|
||||
readinessEndpointName: options.ReadinessEndpointName,
|
||||
livenessEndpointName: options.LivenessEndpointName,
|
||||
cluster: cluster,
|
||||
recorderProvider: recorderProvider,
|
||||
resourceLock: resourceLock,
|
||||
metricsListener: metricsListener,
|
||||
metricsExtraHandlers: metricsExtraHandlers,
|
||||
controllerOptions: options.Controller,
|
||||
logger: options.Logger,
|
||||
elected: make(chan struct{}),
|
||||
port: options.Port,
|
||||
host: options.Host,
|
||||
certDir: options.CertDir,
|
||||
webhookServer: options.WebhookServer,
|
||||
leaseDuration: *options.LeaseDuration,
|
||||
renewDeadline: *options.RenewDeadline,
|
||||
retryPeriod: *options.RetryPeriod,
|
||||
healthProbeListener: healthProbeListener,
|
||||
readinessEndpointName: options.ReadinessEndpointName,
|
||||
livenessEndpointName: options.LivenessEndpointName,
|
||||
gracefulShutdownTimeout: *options.GracefulShutdownTimeout,
|
||||
internalProceduresStop: make(chan struct{}),
|
||||
leaderElectionStopped: make(chan struct{}),
|
||||
leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// defaultNewClient creates the default caching client
|
||||
func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) {
|
||||
// Create the Client for Write operations.
|
||||
c, err := client.New(config, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// AndFrom will use a supplied type and convert to Options
|
||||
// any options already set on Options will be ignored, this is used to allow
|
||||
// cli flags to override anything specified in the config file.
|
||||
func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) {
|
||||
if inj, wantsScheme := loader.(inject.Scheme); wantsScheme {
|
||||
err := inj.InjectScheme(o.Scheme)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
}
|
||||
|
||||
return &client.DelegatingClient{
|
||||
Reader: &client.DelegatingReader{
|
||||
CacheReader: cache,
|
||||
ClientReader: c,
|
||||
},
|
||||
Writer: c,
|
||||
StatusClient: c,
|
||||
}, nil
|
||||
newObj, err := loader.Complete()
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
|
||||
o = o.setLeaderElectionConfig(newObj)
|
||||
|
||||
if o.SyncPeriod == nil && newObj.SyncPeriod != nil {
|
||||
o.SyncPeriod = &newObj.SyncPeriod.Duration
|
||||
}
|
||||
|
||||
if o.Namespace == "" && newObj.CacheNamespace != "" {
|
||||
o.Namespace = newObj.CacheNamespace
|
||||
}
|
||||
|
||||
if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" {
|
||||
o.MetricsBindAddress = newObj.Metrics.BindAddress
|
||||
}
|
||||
|
||||
if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" {
|
||||
o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress
|
||||
}
|
||||
|
||||
if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" {
|
||||
o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName
|
||||
}
|
||||
|
||||
if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" {
|
||||
o.LivenessEndpointName = newObj.Health.LivenessEndpointName
|
||||
}
|
||||
|
||||
if o.Port == 0 && newObj.Webhook.Port != nil {
|
||||
o.Port = *newObj.Webhook.Port
|
||||
}
|
||||
|
||||
if o.Host == "" && newObj.Webhook.Host != "" {
|
||||
o.Host = newObj.Webhook.Host
|
||||
}
|
||||
|
||||
if o.CertDir == "" && newObj.Webhook.CertDir != "" {
|
||||
o.CertDir = newObj.Webhook.CertDir
|
||||
}
|
||||
|
||||
if newObj.Controller != nil {
|
||||
if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil {
|
||||
o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout
|
||||
}
|
||||
|
||||
if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 {
|
||||
o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address
|
||||
// AndFromOrDie will use options.AndFrom() and will panic if there are errors.
|
||||
func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options {
|
||||
o, err := o.AndFrom(loader)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not parse config file: %v", err))
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
|
||||
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
|
||||
o.LeaderElection = *obj.LeaderElection.LeaderElect
|
||||
}
|
||||
|
||||
if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" {
|
||||
o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock
|
||||
}
|
||||
|
||||
if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" {
|
||||
o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace
|
||||
}
|
||||
|
||||
if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" {
|
||||
o.LeaderElectionID = obj.LeaderElection.ResourceName
|
||||
}
|
||||
|
||||
if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) {
|
||||
o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration
|
||||
}
|
||||
|
||||
if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) {
|
||||
o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration
|
||||
}
|
||||
|
||||
if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) {
|
||||
o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
|
||||
func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||
if addr == "" || addr == "0" {
|
||||
return nil, nil
|
||||
@ -374,37 +512,30 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||
return ln, nil
|
||||
}
|
||||
|
||||
// setOptionsDefaults set default values for Options fields
|
||||
// setOptionsDefaults set default values for Options fields.
|
||||
func setOptionsDefaults(options Options) Options {
|
||||
// Use the Kubernetes client-go scheme if none is specified
|
||||
if options.Scheme == nil {
|
||||
options.Scheme = scheme.Scheme
|
||||
}
|
||||
|
||||
if options.MapperProvider == nil {
|
||||
options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) {
|
||||
return apiutil.NewDynamicRESTMapper(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow newClient to be mocked
|
||||
if options.NewClient == nil {
|
||||
options.NewClient = defaultNewClient
|
||||
}
|
||||
|
||||
// Allow newCache to be mocked
|
||||
if options.NewCache == nil {
|
||||
options.NewCache = cache.New
|
||||
// Allow newResourceLock to be mocked
|
||||
if options.newResourceLock == nil {
|
||||
options.newResourceLock = leaderelection.NewResourceLock
|
||||
}
|
||||
|
||||
// Allow newRecorderProvider to be mocked
|
||||
if options.newRecorderProvider == nil {
|
||||
options.newRecorderProvider = internalrecorder.NewProvider
|
||||
options.newRecorderProvider = intrec.NewProvider
|
||||
}
|
||||
|
||||
// Allow newResourceLock to be mocked
|
||||
if options.newResourceLock == nil {
|
||||
options.newResourceLock = leaderelection.NewResourceLock
|
||||
// This is duplicated with pkg/cluster, we need it here
|
||||
// for the leader election and there to provide the user with
|
||||
// an EventBroadcaster
|
||||
if options.EventBroadcaster == nil {
|
||||
// defer initialization to avoid leaking by default
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return record.NewBroadcaster(), true
|
||||
}
|
||||
} else {
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return options.EventBroadcaster, false
|
||||
}
|
||||
}
|
||||
|
||||
if options.newMetricsListener == nil {
|
||||
@ -423,10 +554,6 @@ func setOptionsDefaults(options Options) Options {
|
||||
options.RetryPeriod = &retryPeriod
|
||||
}
|
||||
|
||||
if options.EventBroadcaster == nil {
|
||||
options.EventBroadcaster = record.NewBroadcaster()
|
||||
}
|
||||
|
||||
if options.ReadinessEndpointName == "" {
|
||||
options.ReadinessEndpointName = defaultReadinessEndpoint
|
||||
}
|
||||
@ -439,5 +566,14 @@ func setOptionsDefaults(options Options) Options {
|
||||
options.newHealthProbeListener = defaultHealthProbeListener
|
||||
}
|
||||
|
||||
if options.GracefulShutdownTimeout == nil {
|
||||
gracefulShutdownTimeout := defaultGracefulShutdownPeriod
|
||||
options.GracefulShutdownTimeout = &gracefulShutdownTimeout
|
||||
}
|
||||
|
||||
if options.Logger == nil {
|
||||
options.Logger = logf.RuntimeLog.WithName("manager")
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
10
vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
generated
vendored
10
vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package signals
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
@ -26,18 +27,19 @@ var onlyOneSignalHandler = make(chan struct{})
|
||||
// SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned
|
||||
// which is closed on one of these signals. If a second signal is caught, the program
|
||||
// is terminated with exit code 1.
|
||||
func SetupSignalHandler() (stopCh <-chan struct{}) {
|
||||
func SetupSignalHandler() context.Context {
|
||||
close(onlyOneSignalHandler) // panics when called twice
|
||||
|
||||
stop := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
c := make(chan os.Signal, 2)
|
||||
signal.Notify(c, shutdownSignals...)
|
||||
go func() {
|
||||
<-c
|
||||
close(stop)
|
||||
cancel()
|
||||
<-c
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
}()
|
||||
|
||||
return stop
|
||||
return ctx
|
||||
}
|
||||
|
13
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go
generated
vendored
13
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
@ -50,7 +51,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// client metrics
|
||||
// client metrics.
|
||||
requestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: RestClientSubsystem,
|
||||
Name: LatencyKey,
|
||||
@ -64,7 +65,7 @@ var (
|
||||
Help: "Number of HTTP requests, partitioned by status code, method, and host.",
|
||||
}, []string{"code", "method", "host"})
|
||||
|
||||
// reflector metrics
|
||||
// reflector metrics.
|
||||
|
||||
// TODO(directxman12): update these to be histograms once the metrics overhaul KEP
|
||||
// PRs start landing.
|
||||
@ -123,7 +124,7 @@ func init() {
|
||||
registerReflectorMetrics()
|
||||
}
|
||||
|
||||
// registerClientMetrics sets up the client latency metrics from client-go
|
||||
// registerClientMetrics sets up the client latency metrics from client-go.
|
||||
func registerClientMetrics() {
|
||||
// register the metrics with our registry
|
||||
Registry.MustRegister(requestLatency)
|
||||
@ -136,7 +137,7 @@ func registerClientMetrics() {
|
||||
})
|
||||
}
|
||||
|
||||
// registerReflectorMetrics sets up reflector (reconcile) loop metrics
|
||||
// registerReflectorMetrics sets up reflector (reconcile) loop metrics.
|
||||
func registerReflectorMetrics() {
|
||||
Registry.MustRegister(listsTotal)
|
||||
Registry.MustRegister(listsDuration)
|
||||
@ -162,7 +163,7 @@ type latencyAdapter struct {
|
||||
metric *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) {
|
||||
func (l *latencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) {
|
||||
l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds())
|
||||
}
|
||||
|
||||
@ -170,7 +171,7 @@ type resultAdapter struct {
|
||||
metric *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func (r *resultAdapter) Increment(code, method, host string) {
|
||||
func (r *resultAdapter) Increment(_ context.Context, code, method, host string) {
|
||||
r.metric.WithLabelValues(code, method, host).Inc()
|
||||
}
|
||||
|
||||
|
2
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go
generated
vendored
@ -26,5 +26,5 @@ type RegistererGatherer interface {
|
||||
}
|
||||
|
||||
// Registry is a prometheus registry for storing metrics within the
|
||||
// controller-runtime
|
||||
// controller-runtime.
|
||||
var Registry RegistererGatherer = prometheus.NewRegistry()
|
||||
|
234
vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
generated
vendored
234
vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
generated
vendored
@ -17,6 +17,11 @@ limitations under the License.
|
||||
package predicate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
)
|
||||
@ -41,6 +46,9 @@ type Predicate interface {
|
||||
var _ Predicate = Funcs{}
|
||||
var _ Predicate = ResourceVersionChangedPredicate{}
|
||||
var _ Predicate = GenerationChangedPredicate{}
|
||||
var _ Predicate = AnnotationChangedPredicate{}
|
||||
var _ Predicate = or{}
|
||||
var _ Predicate = and{}
|
||||
|
||||
// Funcs is a function that implements Predicate.
|
||||
type Funcs struct {
|
||||
@ -57,7 +65,7 @@ type Funcs struct {
|
||||
GenericFunc func(event.GenericEvent) bool
|
||||
}
|
||||
|
||||
// Create implements Predicate
|
||||
// Create implements Predicate.
|
||||
func (p Funcs) Create(e event.CreateEvent) bool {
|
||||
if p.CreateFunc != nil {
|
||||
return p.CreateFunc(e)
|
||||
@ -65,7 +73,7 @@ func (p Funcs) Create(e event.CreateEvent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Delete implements Predicate
|
||||
// Delete implements Predicate.
|
||||
func (p Funcs) Delete(e event.DeleteEvent) bool {
|
||||
if p.DeleteFunc != nil {
|
||||
return p.DeleteFunc(e)
|
||||
@ -73,7 +81,7 @@ func (p Funcs) Delete(e event.DeleteEvent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Update implements Predicate
|
||||
// Update implements Predicate.
|
||||
func (p Funcs) Update(e event.UpdateEvent) bool {
|
||||
if p.UpdateFunc != nil {
|
||||
return p.UpdateFunc(e)
|
||||
@ -81,7 +89,7 @@ func (p Funcs) Update(e event.UpdateEvent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Generic implements Predicate
|
||||
// Generic implements Predicate.
|
||||
func (p Funcs) Generic(e event.GenericEvent) bool {
|
||||
if p.GenericFunc != nil {
|
||||
return p.GenericFunc(e)
|
||||
@ -89,30 +97,43 @@ func (p Funcs) Generic(e event.GenericEvent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ResourceVersionChangedPredicate implements a default update predicate function on resource version change
|
||||
// NewPredicateFuncs returns a predicate funcs that applies the given filter function
|
||||
// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied
|
||||
// to the new object.
|
||||
func NewPredicateFuncs(filter func(object client.Object) bool) Funcs {
|
||||
return Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return filter(e.Object)
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return filter(e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return filter(e.Object)
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
return filter(e.Object)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceVersionChangedPredicate implements a default update predicate function on resource version change.
|
||||
type ResourceVersionChangedPredicate struct {
|
||||
Funcs
|
||||
}
|
||||
|
||||
// Update implements default UpdateEvent filter for validating resource version change
|
||||
// Update implements default UpdateEvent filter for validating resource version change.
|
||||
func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool {
|
||||
if e.MetaOld == nil {
|
||||
log.Error(nil, "UpdateEvent has no old metadata", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectOld == nil {
|
||||
log.Error(nil, "GenericEvent has no old runtime object to update", "event", e)
|
||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectNew == nil {
|
||||
log.Error(nil, "GenericEvent has no new runtime object for update", "event", e)
|
||||
log.Error(nil, "Update event has no new object to update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.MetaNew == nil {
|
||||
log.Error(nil, "UpdateEvent has no new metadata", "event", e)
|
||||
return false
|
||||
}
|
||||
return e.MetaNew.GetResourceVersion() != e.MetaOld.GetResourceVersion()
|
||||
|
||||
return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion()
|
||||
}
|
||||
|
||||
// GenerationChangedPredicate implements a default update predicate function on Generation change.
|
||||
@ -135,23 +156,178 @@ type GenerationChangedPredicate struct {
|
||||
Funcs
|
||||
}
|
||||
|
||||
// Update implements default UpdateEvent filter for validating generation change
|
||||
// Update implements default UpdateEvent filter for validating generation change.
|
||||
func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool {
|
||||
if e.MetaOld == nil {
|
||||
log.Error(nil, "Update event has no old metadata", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectOld == nil {
|
||||
log.Error(nil, "Update event has no old runtime object to update", "event", e)
|
||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectNew == nil {
|
||||
log.Error(nil, "Update event has no new runtime object for update", "event", e)
|
||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.MetaNew == nil {
|
||||
log.Error(nil, "Update event has no new metadata", "event", e)
|
||||
return false
|
||||
}
|
||||
return e.MetaNew.GetGeneration() != e.MetaOld.GetGeneration()
|
||||
|
||||
return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration()
|
||||
}
|
||||
|
||||
// AnnotationChangedPredicate implements a default update predicate function on annotation change.
|
||||
//
|
||||
// This predicate will skip update events that have no change in the object's annotation.
|
||||
// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example:
|
||||
//
|
||||
// Controller.Watch(
|
||||
// &source.Kind{Type: v1.MyCustomKind},
|
||||
// &handler.EnqueueRequestForObject{},
|
||||
// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{}))
|
||||
//
|
||||
// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented
|
||||
// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API).
|
||||
type AnnotationChangedPredicate struct {
|
||||
Funcs
|
||||
}
|
||||
|
||||
// Update implements default UpdateEvent filter for validating annotation change.
|
||||
func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool {
|
||||
if e.ObjectOld == nil {
|
||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectNew == nil {
|
||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||
return false
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations())
|
||||
}
|
||||
|
||||
// LabelChangedPredicate implements a default update predicate function on label change.
|
||||
//
|
||||
// This predicate will skip update events that have no change in the object's label.
|
||||
// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example:
|
||||
//
|
||||
// Controller.Watch(
|
||||
// &source.Kind{Type: v1.MyCustomKind},
|
||||
// &handler.EnqueueRequestForObject{},
|
||||
// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))
|
||||
//
|
||||
// This will be helpful when object's labels is carrying some extra specification information beyond object's spec,
|
||||
// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens.
|
||||
type LabelChangedPredicate struct {
|
||||
Funcs
|
||||
}
|
||||
|
||||
// Update implements default UpdateEvent filter for checking label change.
|
||||
func (LabelChangedPredicate) Update(e event.UpdateEvent) bool {
|
||||
if e.ObjectOld == nil {
|
||||
log.Error(nil, "Update event has no old object to update", "event", e)
|
||||
return false
|
||||
}
|
||||
if e.ObjectNew == nil {
|
||||
log.Error(nil, "Update event has no new object for update", "event", e)
|
||||
return false
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels())
|
||||
}
|
||||
|
||||
// And returns a composite predicate that implements a logical AND of the predicates passed to it.
|
||||
func And(predicates ...Predicate) Predicate {
|
||||
return and{predicates}
|
||||
}
|
||||
|
||||
type and struct {
|
||||
predicates []Predicate
|
||||
}
|
||||
|
||||
func (a and) Create(e event.CreateEvent) bool {
|
||||
for _, p := range a.predicates {
|
||||
if !p.Create(e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a and) Update(e event.UpdateEvent) bool {
|
||||
for _, p := range a.predicates {
|
||||
if !p.Update(e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a and) Delete(e event.DeleteEvent) bool {
|
||||
for _, p := range a.predicates {
|
||||
if !p.Delete(e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a and) Generic(e event.GenericEvent) bool {
|
||||
for _, p := range a.predicates {
|
||||
if !p.Generic(e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Or returns a composite predicate that implements a logical OR of the predicates passed to it.
|
||||
func Or(predicates ...Predicate) Predicate {
|
||||
return or{predicates}
|
||||
}
|
||||
|
||||
type or struct {
|
||||
predicates []Predicate
|
||||
}
|
||||
|
||||
func (o or) Create(e event.CreateEvent) bool {
|
||||
for _, p := range o.predicates {
|
||||
if p.Create(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o or) Update(e event.UpdateEvent) bool {
|
||||
for _, p := range o.predicates {
|
||||
if p.Update(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o or) Delete(e event.DeleteEvent) bool {
|
||||
for _, p := range o.predicates {
|
||||
if p.Delete(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o or) Generic(e event.GenericEvent) bool {
|
||||
for _, p := range o.predicates {
|
||||
if p.Generic(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// LabelSelectorPredicate constructs a Predicate from a LabelSelector.
|
||||
// Only objects matching the LabelSelector will be admitted.
|
||||
func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(&s)
|
||||
if err != nil {
|
||||
return Funcs{}, err
|
||||
}
|
||||
return NewPredicateFuncs(func(o client.Object) bool {
|
||||
return selector.Matches(labels.Set(o.GetLabels()))
|
||||
}), nil
|
||||
}
|
||||
|
15
vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go
generated
vendored
15
vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package reconcile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -32,6 +33,14 @@ type Result struct {
|
||||
RequeueAfter time.Duration
|
||||
}
|
||||
|
||||
// IsZero returns true if this result is empty.
|
||||
func (r *Result) IsZero() bool {
|
||||
if r == nil {
|
||||
return true
|
||||
}
|
||||
return *r == Result{}
|
||||
}
|
||||
|
||||
// Request contains the information necessary to reconcile a Kubernetes object. This includes the
|
||||
// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about
|
||||
// any specific Event or the object contents itself.
|
||||
@ -81,13 +90,13 @@ type Reconciler interface {
|
||||
// Reconciler performs a full reconciliation for the object referred to by the Request.
|
||||
// The Controller will requeue the Request to be processed again if an error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
Reconcile(Request) (Result, error)
|
||||
Reconcile(context.Context, Request) (Result, error)
|
||||
}
|
||||
|
||||
// Func is a function that implements the reconcile interface.
|
||||
type Func func(Request) (Result, error)
|
||||
type Func func(context.Context, Request) (Result, error)
|
||||
|
||||
var _ Reconciler = Func(nil)
|
||||
|
||||
// Reconcile implements Reconciler.
|
||||
func (r Func) Reconcile(o Request) (Result, error) { return r(o) }
|
||||
func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) }
|
||||
|
20
vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go
generated
vendored
20
vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go
generated
vendored
@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles.
|
||||
// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10.
|
||||
package inject
|
||||
|
||||
import (
|
||||
@ -27,13 +29,13 @@ import (
|
||||
)
|
||||
|
||||
// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and
|
||||
// Reconciles
|
||||
// Reconciles.
|
||||
type Cache interface {
|
||||
InjectCache(cache cache.Cache) error
|
||||
}
|
||||
|
||||
// CacheInto will set informers on i and return the result if it implements Cache. Returns
|
||||
//// false if i does not implement Cache.
|
||||
// false if i does not implement Cache.
|
||||
func CacheInto(c cache.Cache, i interface{}) (bool, error) {
|
||||
if s, ok := i.(Cache); ok {
|
||||
return true, s.InjectCache(c)
|
||||
@ -47,7 +49,7 @@ type APIReader interface {
|
||||
}
|
||||
|
||||
// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto.
|
||||
// Returns false if i does not implement APIReader
|
||||
// Returns false if i does not implement APIReader.
|
||||
func APIReaderInto(reader client.Reader, i interface{}) (bool, error) {
|
||||
if s, ok := i.(APIReader); ok {
|
||||
return true, s.InjectAPIReader(reader)
|
||||
@ -56,13 +58,13 @@ func APIReaderInto(reader client.Reader, i interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and
|
||||
// Reconciles
|
||||
// Reconciles.
|
||||
type Config interface {
|
||||
InjectConfig(*rest.Config) error
|
||||
}
|
||||
|
||||
// ConfigInto will set config on i and return the result if it implements Config. Returns
|
||||
//// false if i does not implement Config.
|
||||
// false if i does not implement Config.
|
||||
func ConfigInto(config *rest.Config, i interface{}) (bool, error) {
|
||||
if s, ok := i.(Config); ok {
|
||||
return true, s.InjectConfig(config)
|
||||
@ -71,7 +73,7 @@ func ConfigInto(config *rest.Config, i interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and
|
||||
// Reconciles
|
||||
// Reconciles.
|
||||
type Client interface {
|
||||
InjectClient(client.Client) error
|
||||
}
|
||||
@ -86,7 +88,7 @@ func ClientInto(client client.Client, i interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and
|
||||
// Reconciles
|
||||
// Reconciles.
|
||||
type Scheme interface {
|
||||
InjectScheme(scheme *runtime.Scheme) error
|
||||
}
|
||||
@ -115,7 +117,7 @@ func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mapper is used to inject the rest mapper to components that may need it
|
||||
// Mapper is used to inject the rest mapper to components that may need it.
|
||||
type Mapper interface {
|
||||
InjectMapper(meta.RESTMapper) error
|
||||
}
|
||||
@ -132,7 +134,7 @@ func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) {
|
||||
// Func injects dependencies into i.
|
||||
type Func func(i interface{}) error
|
||||
|
||||
// Injector is used by the ControllerManager to inject Func into Controllers
|
||||
// Injector is used by the ControllerManager to inject Func into Controllers.
|
||||
type Injector interface {
|
||||
InjectFunc(f Func) error
|
||||
}
|
||||
|
94
vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
generated
vendored
Normal file
94
vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package scheme contains utilities for gradually building Schemes,
|
||||
// which contain information associating Go types with Kubernetes
|
||||
// groups, versions, and kinds.
|
||||
//
|
||||
// Each API group should define a utility function
|
||||
// called AddToScheme for adding its types to a Scheme:
|
||||
//
|
||||
// // in package myapigroupv1...
|
||||
// var (
|
||||
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
||||
// AddToScheme = SchemeBuilder.AddToScheme
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
|
||||
// }
|
||||
// var (
|
||||
// scheme *runtime.Scheme = runtime.NewScheme()
|
||||
// )
|
||||
//
|
||||
// This also true of the built-in Kubernetes types. Then, in the entrypoint for
|
||||
// your manager, assemble the scheme containing exactly the types you need,
|
||||
// panicing if scheme registration failed. For instance, if our controller needs
|
||||
// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
|
||||
//
|
||||
// func init() {
|
||||
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
|
||||
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
|
||||
// Scheme: scheme,
|
||||
// })
|
||||
// // ...
|
||||
// }
|
||||
//
|
||||
package scheme
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
|
||||
type Builder struct {
|
||||
GroupVersion schema.GroupVersion
|
||||
runtime.SchemeBuilder
|
||||
}
|
||||
|
||||
// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld.
|
||||
func (bld *Builder) Register(object ...runtime.Object) *Builder {
|
||||
bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(bld.GroupVersion, object...)
|
||||
metav1.AddToGroupVersion(scheme, bld.GroupVersion)
|
||||
return nil
|
||||
})
|
||||
return bld
|
||||
}
|
||||
|
||||
// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld.
|
||||
func (bld *Builder) RegisterAll(b *Builder) *Builder {
|
||||
bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...)
|
||||
return bld
|
||||
}
|
||||
|
||||
// AddToScheme adds all registered types to s.
|
||||
func (bld *Builder) AddToScheme(s *runtime.Scheme) error {
|
||||
return bld.SchemeBuilder.AddToScheme(s)
|
||||
}
|
||||
|
||||
// Build returns a new Scheme containing the registered types.
|
||||
func (bld *Builder) Build() (*runtime.Scheme, error) {
|
||||
s := runtime.NewScheme()
|
||||
return s, bld.AddToScheme(s)
|
||||
}
|
71
vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go
generated
vendored
71
vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go
generated
vendored
@ -19,15 +19,13 @@ package internal
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
@ -35,31 +33,22 @@ var log = logf.RuntimeLog.WithName("source").WithName("EventHandler")
|
||||
|
||||
var _ cache.ResourceEventHandler = EventHandler{}
|
||||
|
||||
// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface
|
||||
// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface.
|
||||
type EventHandler struct {
|
||||
EventHandler handler.EventHandler
|
||||
Queue workqueue.RateLimitingInterface
|
||||
Predicates []predicate.Predicate
|
||||
}
|
||||
|
||||
// OnAdd creates CreateEvent and calls Create on EventHandler
|
||||
// OnAdd creates CreateEvent and calls Create on EventHandler.
|
||||
func (e EventHandler) OnAdd(obj interface{}) {
|
||||
c := event.CreateEvent{}
|
||||
|
||||
// Pull metav1.Object out of the object
|
||||
if o, err := meta.Accessor(obj); err == nil {
|
||||
c.Meta = o
|
||||
} else {
|
||||
log.Error(err, "OnAdd missing Meta",
|
||||
"object", obj, "type", fmt.Sprintf("%T", obj))
|
||||
return
|
||||
}
|
||||
|
||||
// Pull the runtime.Object out of the object
|
||||
if o, ok := obj.(runtime.Object); ok {
|
||||
// Pull Object out of the object
|
||||
if o, ok := obj.(client.Object); ok {
|
||||
c.Object = o
|
||||
} else {
|
||||
log.Error(nil, "OnAdd missing runtime.Object",
|
||||
log.Error(nil, "OnAdd missing Object",
|
||||
"object", obj, "type", fmt.Sprintf("%T", obj))
|
||||
return
|
||||
}
|
||||
@ -74,21 +63,11 @@ func (e EventHandler) OnAdd(obj interface{}) {
|
||||
e.EventHandler.Create(c, e.Queue)
|
||||
}
|
||||
|
||||
// OnUpdate creates UpdateEvent and calls Update on EventHandler
|
||||
// OnUpdate creates UpdateEvent and calls Update on EventHandler.
|
||||
func (e EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
u := event.UpdateEvent{}
|
||||
|
||||
// Pull metav1.Object out of the object
|
||||
if o, err := meta.Accessor(oldObj); err == nil {
|
||||
u.MetaOld = o
|
||||
} else {
|
||||
log.Error(err, "OnUpdate missing MetaOld",
|
||||
"object", oldObj, "type", fmt.Sprintf("%T", oldObj))
|
||||
return
|
||||
}
|
||||
|
||||
// Pull the runtime.Object out of the object
|
||||
if o, ok := oldObj.(runtime.Object); ok {
|
||||
if o, ok := oldObj.(client.Object); ok {
|
||||
u.ObjectOld = o
|
||||
} else {
|
||||
log.Error(nil, "OnUpdate missing ObjectOld",
|
||||
@ -96,21 +75,12 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// Pull metav1.Object out of the object
|
||||
if o, err := meta.Accessor(newObj); err == nil {
|
||||
u.MetaNew = o
|
||||
} else {
|
||||
log.Error(err, "OnUpdate missing MetaNew",
|
||||
"object", newObj, "type", fmt.Sprintf("%T", newObj))
|
||||
return
|
||||
}
|
||||
|
||||
// Pull the runtime.Object out of the object
|
||||
if o, ok := newObj.(runtime.Object); ok {
|
||||
// Pull Object out of the object
|
||||
if o, ok := newObj.(client.Object); ok {
|
||||
u.ObjectNew = o
|
||||
} else {
|
||||
log.Error(nil, "OnUpdate missing ObjectNew",
|
||||
"object", oldObj, "type", fmt.Sprintf("%T", oldObj))
|
||||
"object", newObj, "type", fmt.Sprintf("%T", newObj))
|
||||
return
|
||||
}
|
||||
|
||||
@ -124,7 +94,7 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
e.EventHandler.Update(u, e.Queue)
|
||||
}
|
||||
|
||||
// OnDelete creates DeleteEvent and calls Delete on EventHandler
|
||||
// OnDelete creates DeleteEvent and calls Delete on EventHandler.
|
||||
func (e EventHandler) OnDelete(obj interface{}) {
|
||||
d := event.DeleteEvent{}
|
||||
|
||||
@ -134,7 +104,7 @@ func (e EventHandler) OnDelete(obj interface{}) {
|
||||
// This should never happen if we aren't missing events, which we have concluded that we are not
|
||||
// and made decisions off of this belief. Maybe this shouldn't be here?
|
||||
var ok bool
|
||||
if _, ok = obj.(metav1.Object); !ok {
|
||||
if _, ok = obj.(client.Object); !ok {
|
||||
// If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
@ -148,20 +118,11 @@ func (e EventHandler) OnDelete(obj interface{}) {
|
||||
obj = tombstone.Obj
|
||||
}
|
||||
|
||||
// Pull metav1.Object out of the object
|
||||
if o, err := meta.Accessor(obj); err == nil {
|
||||
d.Meta = o
|
||||
} else {
|
||||
log.Error(err, "OnDelete missing Meta",
|
||||
"object", obj, "type", fmt.Sprintf("%T", obj))
|
||||
return
|
||||
}
|
||||
|
||||
// Pull the runtime.Object out of the object
|
||||
if o, ok := obj.(runtime.Object); ok {
|
||||
// Pull Object out of the object
|
||||
if o, ok := obj.(client.Object); ok {
|
||||
d.Object = o
|
||||
} else {
|
||||
log.Error(nil, "OnDelete missing runtime.Object",
|
||||
log.Error(nil, "OnDelete missing Object",
|
||||
"object", obj, "type", fmt.Sprintf("%T", obj))
|
||||
return
|
||||
}
|
||||
|
128
vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go
generated
vendored
128
vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go
generated
vendored
@ -18,12 +18,13 @@ package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
@ -53,13 +54,20 @@ const (
|
||||
type Source interface {
|
||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||
// to enqueue reconcile.Requests.
|
||||
Start(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
||||
Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
||||
}
|
||||
|
||||
// SyncingSource is a source that needs syncing prior to being usable. The controller
|
||||
// will call its WaitForSync prior to starting workers.
|
||||
type SyncingSource interface {
|
||||
Source
|
||||
WaitForSync(ctx context.Context) error
|
||||
}
|
||||
|
||||
// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used
|
||||
// and not overwritten. It can be used to watch objects in a different cluster by passing the cache
|
||||
// from that other cluster
|
||||
func NewKindWithCache(object runtime.Object, cache cache.Cache) Source {
|
||||
// from that other cluster.
|
||||
func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource {
|
||||
return &kindWithCache{kind: Kind{Type: object, cache: cache}}
|
||||
}
|
||||
|
||||
@ -67,27 +75,35 @@ type kindWithCache struct {
|
||||
kind Kind
|
||||
}
|
||||
|
||||
func (ks *kindWithCache) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
prct ...predicate.Predicate) error {
|
||||
return ks.kind.Start(handler, queue, prct...)
|
||||
return ks.kind.Start(ctx, handler, queue, prct...)
|
||||
}
|
||||
|
||||
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create)
|
||||
func (ks *kindWithCache) WaitForSync(ctx context.Context) error {
|
||||
return ks.kind.WaitForSync(ctx)
|
||||
}
|
||||
|
||||
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
|
||||
type Kind struct {
|
||||
// Type is the type of object to watch. e.g. &v1.Pod{}
|
||||
Type runtime.Object
|
||||
Type client.Object
|
||||
|
||||
// cache used to watch APIs
|
||||
cache cache.Cache
|
||||
|
||||
// started may contain an error if one was encountered during startup. If its closed and does not
|
||||
// contain an error, startup and syncing finished.
|
||||
started chan error
|
||||
startCancel func()
|
||||
}
|
||||
|
||||
var _ Source = &Kind{}
|
||||
var _ SyncingSource = &Kind{}
|
||||
|
||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||
// to enqueue reconcile.Requests.
|
||||
func (ks *Kind) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
prct ...predicate.Predicate) error {
|
||||
|
||||
// Type should have been specified by the user.
|
||||
if ks.Type == nil {
|
||||
return fmt.Errorf("must specify Kind.Type")
|
||||
@ -98,16 +114,30 @@ func (ks *Kind) Start(handler handler.EventHandler, queue workqueue.RateLimiting
|
||||
return fmt.Errorf("must call CacheInto on Kind before calling Start")
|
||||
}
|
||||
|
||||
// Lookup the Informer from the Cache and add an EventHandler which populates the Queue
|
||||
i, err := ks.cache.GetInformer(context.TODO(), ks.Type)
|
||||
if err != nil {
|
||||
if kindMatchErr, ok := err.(*meta.NoKindMatchError); ok {
|
||||
log.Error(err, "if kind is a CRD, it should be installed before calling Start",
|
||||
"kind", kindMatchErr.GroupKind)
|
||||
// cache.GetInformer will block until its context is cancelled if the cache was already started and it can not
|
||||
// sync that informer (most commonly due to RBAC issues).
|
||||
ctx, ks.startCancel = context.WithCancel(ctx)
|
||||
ks.started = make(chan error)
|
||||
go func() {
|
||||
// Lookup the Informer from the Cache and add an EventHandler which populates the Queue
|
||||
i, err := ks.cache.GetInformer(ctx, ks.Type)
|
||||
if err != nil {
|
||||
kindMatchErr := &meta.NoKindMatchError{}
|
||||
if errors.As(err, &kindMatchErr) {
|
||||
log.Error(err, "if kind is a CRD, it should be installed before calling Start",
|
||||
"kind", kindMatchErr.GroupKind)
|
||||
}
|
||||
ks.started <- err
|
||||
return
|
||||
}
|
||||
return err
|
||||
}
|
||||
i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
|
||||
i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
|
||||
if !ks.cache.WaitForCacheSync(ctx) {
|
||||
// Would be great to return something more informative here
|
||||
ks.started <- errors.New("cache did not sync")
|
||||
}
|
||||
close(ks.started)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -115,7 +145,19 @@ func (ks *Kind) String() string {
|
||||
if ks.Type != nil && ks.Type.GetObjectKind() != nil {
|
||||
return fmt.Sprintf("kind source: %v", ks.Type.GetObjectKind().GroupVersionKind().String())
|
||||
}
|
||||
return fmt.Sprintf("kind source: unknown GVK")
|
||||
return "kind source: unknown GVK"
|
||||
}
|
||||
|
||||
// WaitForSync implements SyncingSource to allow controllers to wait with starting
|
||||
// workers until the cache is synced.
|
||||
func (ks *Kind) WaitForSync(ctx context.Context) error {
|
||||
select {
|
||||
case err := <-ks.started:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
ks.startCancel()
|
||||
return errors.New("timed out waiting for cache to be synced")
|
||||
}
|
||||
}
|
||||
|
||||
var _ inject.Cache = &Kind{}
|
||||
@ -173,6 +215,7 @@ func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error {
|
||||
|
||||
// Start implements Source and should only be called by the Controller.
|
||||
func (cs *Channel) Start(
|
||||
ctx context.Context,
|
||||
handler handler.EventHandler,
|
||||
queue workqueue.RateLimitingInterface,
|
||||
prct ...predicate.Predicate) error {
|
||||
@ -191,12 +234,17 @@ func (cs *Channel) Start(
|
||||
cs.DestBufferSize = defaultBufferSize
|
||||
}
|
||||
|
||||
dst := make(chan event.GenericEvent, cs.DestBufferSize)
|
||||
|
||||
cs.destLock.Lock()
|
||||
cs.dest = append(cs.dest, dst)
|
||||
cs.destLock.Unlock()
|
||||
|
||||
cs.once.Do(func() {
|
||||
// Distribute GenericEvents to all EventHandler / Queue pairs Watching this source
|
||||
go cs.syncLoop()
|
||||
go cs.syncLoop(ctx)
|
||||
})
|
||||
|
||||
dst := make(chan event.GenericEvent, cs.DestBufferSize)
|
||||
go func() {
|
||||
for evt := range dst {
|
||||
shouldHandle := true
|
||||
@ -213,11 +261,6 @@ func (cs *Channel) Start(
|
||||
}
|
||||
}()
|
||||
|
||||
cs.destLock.Lock()
|
||||
defer cs.destLock.Unlock()
|
||||
|
||||
cs.dest = append(cs.dest, dst)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -244,20 +287,26 @@ func (cs *Channel) distribute(evt event.GenericEvent) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *Channel) syncLoop() {
|
||||
func (cs *Channel) syncLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-cs.stop:
|
||||
case <-ctx.Done():
|
||||
// Close destination channels
|
||||
cs.doStop()
|
||||
return
|
||||
case evt := <-cs.Source:
|
||||
case evt, stillOpen := <-cs.Source:
|
||||
if !stillOpen {
|
||||
// if the source channel is closed, we're never gonna get
|
||||
// anything more on it, so stop & bail
|
||||
cs.doStop()
|
||||
return
|
||||
}
|
||||
cs.distribute(evt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create)
|
||||
// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
|
||||
type Informer struct {
|
||||
// Informer is the controller-runtime Informer
|
||||
Informer cache.Informer
|
||||
@ -267,9 +316,8 @@ var _ Source = &Informer{}
|
||||
|
||||
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
|
||||
// to enqueue reconcile.Requests.
|
||||
func (is *Informer) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
prct ...predicate.Predicate) error {
|
||||
|
||||
// Informer should have been specified by the user.
|
||||
if is.Informer == nil {
|
||||
return fmt.Errorf("must specify Informer.Informer")
|
||||
@ -283,13 +331,15 @@ func (is *Informer) String() string {
|
||||
return fmt.Sprintf("informer source: %p", is.Informer)
|
||||
}
|
||||
|
||||
// Func is a function that implements Source
|
||||
type Func func(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
||||
var _ Source = Func(nil)
|
||||
|
||||
// Start implements Source
|
||||
func (f Func) Start(evt handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
// Func is a function that implements Source.
|
||||
type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error
|
||||
|
||||
// Start implements Source.
|
||||
func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface,
|
||||
pr ...predicate.Predicate) error {
|
||||
return f(evt, queue, pr...)
|
||||
return f(ctx, evt, queue, pr...)
|
||||
}
|
||||
|
||||
func (f Func) String() string {
|
||||
|
8
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go
generated
vendored
8
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go
generated
vendored
@ -31,7 +31,7 @@ type Decoder struct {
|
||||
codecs serializer.CodecFactory
|
||||
}
|
||||
|
||||
// NewDecoder creates a Decoder given the runtime.Scheme
|
||||
// NewDecoder creates a Decoder given the runtime.Scheme.
|
||||
func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) {
|
||||
return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil
|
||||
}
|
||||
@ -64,11 +64,7 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er
|
||||
}
|
||||
if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured {
|
||||
// unmarshal into unstructured's underlying object to avoid calling the decoder
|
||||
if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object)
|
||||
}
|
||||
|
||||
deserializer := d.codecs.UniversalDeserializer()
|
||||
|
5
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go
generated
vendored
5
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Defaulter defines functions for setting defaults on resources
|
||||
// Defaulter defines functions for setting defaults on resources.
|
||||
type Defaulter interface {
|
||||
runtime.Object
|
||||
Default()
|
||||
@ -58,8 +58,7 @@ func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response {
|
||||
|
||||
// Get the object in the request
|
||||
obj := h.defaulter.DeepCopyObject().(Defaulter)
|
||||
err := h.decoder.Decode(req, obj)
|
||||
if err != nil {
|
||||
if err := h.decoder.Decode(req, obj); err != nil {
|
||||
return Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
|
94
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go
generated
vendored
94
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go
generated
vendored
@ -24,9 +24,10 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
v1 "k8s.io/api/admission/v1"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
@ -35,7 +36,8 @@ var admissionScheme = runtime.NewScheme()
|
||||
var admissionCodecs = serializer.NewCodecFactory(admissionScheme)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(admissionv1beta1.AddToScheme(admissionScheme))
|
||||
utilruntime.Must(v1.AddToScheme(admissionScheme))
|
||||
utilruntime.Must(v1beta1.AddToScheme(admissionScheme))
|
||||
}
|
||||
|
||||
var _ http.Handler = &Webhook{}
|
||||
@ -43,16 +45,13 @@ var _ http.Handler = &Webhook{}
|
||||
func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
var body []byte
|
||||
var err error
|
||||
ctx := r.Context()
|
||||
if wh.WithContextFunc != nil {
|
||||
ctx = wh.WithContextFunc(ctx, r)
|
||||
}
|
||||
|
||||
var reviewResponse Response
|
||||
if r.Body != nil {
|
||||
if body, err = ioutil.ReadAll(r.Body); err != nil {
|
||||
wh.log.Error(err, "unable to read the body from the incoming request")
|
||||
reviewResponse = Errored(http.StatusBadRequest, err)
|
||||
wh.writeResponse(w, reviewResponse)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if r.Body == nil {
|
||||
err = errors.New("request body is empty")
|
||||
wh.log.Error(err, "bad request")
|
||||
reviewResponse = Errored(http.StatusBadRequest, err)
|
||||
@ -60,9 +59,16 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
if body, err = ioutil.ReadAll(r.Body); err != nil {
|
||||
wh.log.Error(err, "unable to read the body from the incoming request")
|
||||
reviewResponse = Errored(http.StatusBadRequest, err)
|
||||
wh.writeResponse(w, reviewResponse)
|
||||
return
|
||||
}
|
||||
|
||||
// verify the content type is accurate
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
if contentType := r.Header.Get("Content-Type"); contentType != "application/json" {
|
||||
err = fmt.Errorf("contentType=%s, expected application/json", contentType)
|
||||
wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType)
|
||||
reviewResponse = Errored(http.StatusBadRequest, err)
|
||||
@ -70,12 +76,19 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can
|
||||
// be decoded into the v1 type. However the runtime codec's decoder guesses which type to
|
||||
// decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an
|
||||
// unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1.
|
||||
// The actual AdmissionReview GVK will be used to write a typed response in case the
|
||||
// webhook config permits multiple versions, otherwise this response will fail.
|
||||
req := Request{}
|
||||
ar := v1beta1.AdmissionReview{
|
||||
// avoid an extra copy
|
||||
Request: &req.AdmissionRequest,
|
||||
}
|
||||
if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil {
|
||||
ar := unversionedAdmissionReview{}
|
||||
// avoid an extra copy
|
||||
ar.Request = &req.AdmissionRequest
|
||||
ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview"))
|
||||
_, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar)
|
||||
if err != nil {
|
||||
wh.log.Error(err, "unable to decode the request")
|
||||
reviewResponse = Errored(http.StatusBadRequest, err)
|
||||
wh.writeResponse(w, reviewResponse)
|
||||
@ -83,22 +96,51 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource)
|
||||
|
||||
// TODO: add panic-recovery for Handle
|
||||
reviewResponse = wh.Handle(r.Context(), req)
|
||||
wh.writeResponse(w, reviewResponse)
|
||||
reviewResponse = wh.Handle(ctx, req)
|
||||
wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK)
|
||||
}
|
||||
|
||||
// writeResponse writes response to w generically, i.e. without encoding GVK information.
|
||||
func (wh *Webhook) writeResponse(w io.Writer, response Response) {
|
||||
encoder := json.NewEncoder(w)
|
||||
responseAdmissionReview := v1beta1.AdmissionReview{
|
||||
wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse})
|
||||
}
|
||||
|
||||
// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary
|
||||
// if multiple AdmissionReview versions are permitted by the webhook.
|
||||
func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) {
|
||||
ar := v1.AdmissionReview{
|
||||
Response: &response.AdmissionResponse,
|
||||
}
|
||||
err := encoder.Encode(responseAdmissionReview)
|
||||
if err != nil {
|
||||
// Default to a v1 AdmissionReview, otherwise the API server may not recognize the request
|
||||
// if multiple AdmissionReview versions are permitted by the webhook config.
|
||||
// TODO(estroz): this should be configurable since older API servers won't know about v1.
|
||||
if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) {
|
||||
ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview"))
|
||||
} else {
|
||||
ar.SetGroupVersionKind(*admRevGVK)
|
||||
}
|
||||
wh.writeAdmissionResponse(w, ar)
|
||||
}
|
||||
|
||||
// writeAdmissionResponse writes ar to w.
|
||||
func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) {
|
||||
if err := json.NewEncoder(w).Encode(ar); err != nil {
|
||||
wh.log.Error(err, "unable to encode the response")
|
||||
wh.writeResponse(w, Errored(http.StatusInternalServerError, err))
|
||||
} else {
|
||||
res := responseAdmissionReview.Response
|
||||
wh.log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed, "result", res.Result)
|
||||
res := ar.Response
|
||||
if log := wh.log; log.V(1).Enabled() {
|
||||
if res.Result != nil {
|
||||
log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason)
|
||||
}
|
||||
log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types.
|
||||
type unversionedAdmissionReview struct {
|
||||
v1.AdmissionReview
|
||||
}
|
||||
|
||||
var _ runtime.Object = &unversionedAdmissionReview{}
|
||||
|
35
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
generated
vendored
35
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
generated
vendored
@ -22,9 +22,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
jsonpatch "gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
)
|
||||
|
||||
@ -37,10 +38,10 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response {
|
||||
if !resp.Allowed {
|
||||
return resp
|
||||
}
|
||||
if resp.PatchType != nil && *resp.PatchType != admissionv1beta1.PatchTypeJSONPatch {
|
||||
if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch {
|
||||
return Errored(http.StatusInternalServerError,
|
||||
fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v",
|
||||
resp.PatchType, admissionv1beta1.PatchTypeJSONPatch))
|
||||
resp.PatchType, admissionv1.PatchTypeJSONPatch))
|
||||
}
|
||||
patches = append(patches, resp.Patches...)
|
||||
}
|
||||
@ -50,13 +51,13 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response {
|
||||
return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err))
|
||||
}
|
||||
return Response{
|
||||
AdmissionResponse: admissionv1beta1.AdmissionResponse{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Code: http.StatusOK,
|
||||
},
|
||||
Patch: marshaledPatch,
|
||||
PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(),
|
||||
PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -76,6 +77,16 @@ func (hs multiMutating) InjectFunc(f inject.Func) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InjectDecoder injects the decoder into the handlers.
|
||||
func (hs multiMutating) InjectDecoder(d *Decoder) error {
|
||||
for _, handler := range hs {
|
||||
if _, err := InjectDecoderInto(d, handler); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultiMutatingHandler combines multiple mutating webhook handlers into a single
|
||||
// mutating webhook handler. Handlers are called in sequential order, and the first
|
||||
// `allowed: false` response may short-circuit the rest. Users must take care to
|
||||
@ -94,7 +105,7 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response {
|
||||
}
|
||||
}
|
||||
return Response{
|
||||
AdmissionResponse: admissionv1beta1.AdmissionResponse{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Code: http.StatusOK,
|
||||
@ -124,3 +135,13 @@ func (hs multiValidating) InjectFunc(f inject.Func) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InjectDecoder injects the decoder into the handlers.
|
||||
func (hs multiValidating) InjectDecoder(d *Decoder) error {
|
||||
for _, handler := range hs {
|
||||
if _, err := InjectDecoderInto(d, handler); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
39
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go
generated
vendored
39
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go
generated
vendored
@ -19,9 +19,8 @@ package admission
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
jsonpatch "gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@ -50,7 +49,7 @@ func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response {
|
||||
// Errored creates a new Response for error-handling a request.
|
||||
func Errored(code int32, err error) Response {
|
||||
return Response{
|
||||
AdmissionResponse: admissionv1beta1.AdmissionResponse{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Code: code,
|
||||
@ -67,7 +66,7 @@ func ValidationResponse(allowed bool, reason string) Response {
|
||||
code = http.StatusOK
|
||||
}
|
||||
resp := Response{
|
||||
AdmissionResponse: admissionv1beta1.AdmissionResponse{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: allowed,
|
||||
Result: &metav1.Status{
|
||||
Code: int32(code),
|
||||
@ -90,9 +89,33 @@ func PatchResponseFromRaw(original, current []byte) Response {
|
||||
}
|
||||
return Response{
|
||||
Patches: patches,
|
||||
AdmissionResponse: admissionv1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(),
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
PatchType: func() *admissionv1.PatchType {
|
||||
if len(patches) == 0 {
|
||||
return nil
|
||||
}
|
||||
pt := admissionv1.PatchTypeJSONPatch
|
||||
return &pt
|
||||
}(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// validationResponseFromStatus returns a response for admitting a request with provided Status object.
|
||||
func validationResponseFromStatus(allowed bool, status metav1.Status) Response {
|
||||
resp := Response{
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: allowed,
|
||||
Result: &status,
|
||||
},
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// WithWarnings adds the given warnings to the Response.
|
||||
// If any warnings were already given, they will not be overwritten.
|
||||
func (r Response) WithWarnings(warnings ...string) Response {
|
||||
r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...)
|
||||
return r
|
||||
}
|
||||
|
24
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go
generated
vendored
24
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go
generated
vendored
@ -18,13 +18,15 @@ package admission
|
||||
|
||||
import (
|
||||
"context"
|
||||
goerrors "errors"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
v1 "k8s.io/api/admission/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Validator defines functions for validating an operation
|
||||
// Validator defines functions for validating an operation.
|
||||
type Validator interface {
|
||||
runtime.Object
|
||||
ValidateCreate() error
|
||||
@ -60,7 +62,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response {
|
||||
|
||||
// Get the object in the request
|
||||
obj := h.validator.DeepCopyObject().(Validator)
|
||||
if req.Operation == v1beta1.Create {
|
||||
if req.Operation == v1.Create {
|
||||
err := h.decoder.Decode(req, obj)
|
||||
if err != nil {
|
||||
return Errored(http.StatusBadRequest, err)
|
||||
@ -68,11 +70,15 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response {
|
||||
|
||||
err = obj.ValidateCreate()
|
||||
if err != nil {
|
||||
var apiStatus apierrors.APIStatus
|
||||
if goerrors.As(err, &apiStatus) {
|
||||
return validationResponseFromStatus(false, apiStatus.Status())
|
||||
}
|
||||
return Denied(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if req.Operation == v1beta1.Update {
|
||||
if req.Operation == v1.Update {
|
||||
oldObj := obj.DeepCopyObject()
|
||||
|
||||
err := h.decoder.DecodeRaw(req.Object, obj)
|
||||
@ -86,11 +92,15 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response {
|
||||
|
||||
err = obj.ValidateUpdate(oldObj)
|
||||
if err != nil {
|
||||
var apiStatus apierrors.APIStatus
|
||||
if goerrors.As(err, &apiStatus) {
|
||||
return validationResponseFromStatus(false, apiStatus.Status())
|
||||
}
|
||||
return Denied(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if req.Operation == v1beta1.Delete {
|
||||
if req.Operation == v1.Delete {
|
||||
// In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346
|
||||
// OldObject contains the object being deleted
|
||||
err := h.decoder.DecodeRaw(req.OldObject, obj)
|
||||
@ -100,6 +110,10 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response {
|
||||
|
||||
err = obj.ValidateDelete()
|
||||
if err != nil {
|
||||
var apiStatus apierrors.APIStatus
|
||||
if goerrors.As(err, &apiStatus) {
|
||||
return validationResponseFromStatus(false, apiStatus.Status())
|
||||
}
|
||||
return Denied(err.Error())
|
||||
}
|
||||
}
|
||||
|
93
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go
generated
vendored
93
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go
generated
vendored
@ -22,13 +22,16 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
jsonpatch "gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -41,7 +44,7 @@ var (
|
||||
// name, namespace), as well as the operation in question
|
||||
// (e.g. Get, Create, etc), and the object itself.
|
||||
type Request struct {
|
||||
admissionv1beta1.AdmissionRequest
|
||||
admissionv1.AdmissionRequest
|
||||
}
|
||||
|
||||
// Response is the output of an admission handler.
|
||||
@ -57,7 +60,7 @@ type Response struct {
|
||||
Patches []jsonpatch.JsonPatchOperation
|
||||
// AdmissionResponse is the raw admission response.
|
||||
// The Patch field in it will be overwritten by the listed patches.
|
||||
admissionv1beta1.AdmissionResponse
|
||||
admissionv1.AdmissionResponse
|
||||
}
|
||||
|
||||
// Complete populates any fields that are yet to be set in
|
||||
@ -84,7 +87,7 @@ func (r *Response) Complete(req Request) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
patchType := admissionv1beta1.PatchTypeJSONPatch
|
||||
patchType := admissionv1.PatchTypeJSONPatch
|
||||
r.PatchType = &patchType
|
||||
|
||||
return nil
|
||||
@ -110,11 +113,19 @@ func (f HandlerFunc) Handle(ctx context.Context, req Request) Response {
|
||||
}
|
||||
|
||||
// Webhook represents each individual webhook.
|
||||
//
|
||||
// It must be registered with a webhook.Server or
|
||||
// populated by StandaloneWebhook to be ran on an arbitrary HTTP server.
|
||||
type Webhook struct {
|
||||
// Handler actually processes an admission request returning whether it was allowed or denied,
|
||||
// and potentially patches to apply to the handler.
|
||||
Handler Handler
|
||||
|
||||
// WithContextFunc will allow you to take the http.Request.Context() and
|
||||
// add any additional information such as passing the request path or
|
||||
// headers thus allowing you to read them from within the handler
|
||||
WithContextFunc func(context.Context, *http.Request) context.Context
|
||||
|
||||
// decoder is constructed on receiving a scheme and passed down to then handler
|
||||
decoder *Decoder
|
||||
|
||||
@ -122,8 +133,8 @@ type Webhook struct {
|
||||
}
|
||||
|
||||
// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook.
|
||||
func (w *Webhook) InjectLogger(l logr.Logger) error {
|
||||
w.log = l
|
||||
func (wh *Webhook) InjectLogger(l logr.Logger) error {
|
||||
wh.log = l
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -131,10 +142,10 @@ func (w *Webhook) InjectLogger(l logr.Logger) error {
|
||||
// If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches.
|
||||
// If the webhook is validating type, it delegates the AdmissionRequest to each handler and
|
||||
// deny the request if anyone denies.
|
||||
func (w *Webhook) Handle(ctx context.Context, req Request) Response {
|
||||
resp := w.Handler.Handle(ctx, req)
|
||||
func (wh *Webhook) Handle(ctx context.Context, req Request) Response {
|
||||
resp := wh.Handler.Handle(ctx, req)
|
||||
if err := resp.Complete(req); err != nil {
|
||||
w.log.Error(err, "unable to encode response")
|
||||
wh.log.Error(err, "unable to encode response")
|
||||
return Errored(http.StatusInternalServerError, errUnableToEncodeResponse)
|
||||
}
|
||||
|
||||
@ -142,19 +153,19 @@ func (w *Webhook) Handle(ctx context.Context, req Request) Response {
|
||||
}
|
||||
|
||||
// InjectScheme injects a scheme into the webhook, in order to construct a Decoder.
|
||||
func (w *Webhook) InjectScheme(s *runtime.Scheme) error {
|
||||
func (wh *Webhook) InjectScheme(s *runtime.Scheme) error {
|
||||
// TODO(directxman12): we should have a better way to pass this down
|
||||
|
||||
var err error
|
||||
w.decoder, err = NewDecoder(s)
|
||||
wh.decoder, err = NewDecoder(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// inject the decoder here too, just in case the order of calling this is not
|
||||
// scheme first, then inject func
|
||||
if w.Handler != nil {
|
||||
if _, err := InjectDecoderInto(w.GetDecoder(), w.Handler); err != nil {
|
||||
if wh.Handler != nil {
|
||||
if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -164,12 +175,12 @@ func (w *Webhook) InjectScheme(s *runtime.Scheme) error {
|
||||
|
||||
// GetDecoder returns a decoder to decode the objects embedded in admission requests.
|
||||
// It may be nil if we haven't received a scheme to use to determine object types yet.
|
||||
func (w *Webhook) GetDecoder() *Decoder {
|
||||
return w.decoder
|
||||
func (wh *Webhook) GetDecoder() *Decoder {
|
||||
return wh.decoder
|
||||
}
|
||||
|
||||
// InjectFunc injects the field setter into the webhook.
|
||||
func (w *Webhook) InjectFunc(f inject.Func) error {
|
||||
func (wh *Webhook) InjectFunc(f inject.Func) error {
|
||||
// inject directly into the handlers. It would be more correct
|
||||
// to do this in a sync.Once in Handle (since we don't have some
|
||||
// other start/finalize-type method), but it's more efficient to
|
||||
@ -189,12 +200,56 @@ func (w *Webhook) InjectFunc(f inject.Func) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := InjectDecoderInto(w.GetDecoder(), target); err != nil {
|
||||
if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return setFields(w.Handler)
|
||||
return setFields(wh.Handler)
|
||||
}
|
||||
|
||||
// StandaloneOptions let you configure a StandaloneWebhook.
|
||||
type StandaloneOptions struct {
|
||||
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
|
||||
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
|
||||
// idea to pass your own scheme in. See the documentation in pkg/scheme for more information.
|
||||
Scheme *runtime.Scheme
|
||||
// Logger to be used by the webhook.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
Logger logr.Logger
|
||||
// MetricsPath is used for labelling prometheus metrics
|
||||
// by the path is served on.
|
||||
// If none is set, prometheus metrics will not be generated.
|
||||
MetricsPath string
|
||||
}
|
||||
|
||||
// StandaloneWebhook prepares a webhook for use without a webhook.Server,
|
||||
// passing in the information normally populated by webhook.Server
|
||||
// and instrumenting the webhook with metrics.
|
||||
//
|
||||
// Use this to attach your webhook to an arbitrary HTTP server or mux.
|
||||
//
|
||||
// Note that you are responsible for terminating TLS if you use StandaloneWebhook
|
||||
// in your own server/mux. In order to be accessed by a kubernetes cluster,
|
||||
// all webhook servers require TLS.
|
||||
func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) {
|
||||
if opts.Scheme == nil {
|
||||
opts.Scheme = scheme.Scheme
|
||||
}
|
||||
|
||||
if err := hook.InjectScheme(opts.Scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.Logger == nil {
|
||||
opts.Logger = logf.RuntimeLog.WithName("webhook")
|
||||
}
|
||||
hook.log = opts.Logger
|
||||
|
||||
if opts.MetricsPath == "" {
|
||||
return hook, nil
|
||||
}
|
||||
return metrics.InstrumentedHook(opts.MetricsPath, hook), nil
|
||||
}
|
||||
|
4
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go
generated
vendored
4
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go
generated
vendored
@ -23,10 +23,10 @@ import (
|
||||
|
||||
// define some aliases for common bits of the webhook functionality
|
||||
|
||||
// Defaulter defines functions for setting defaults on resources
|
||||
// Defaulter defines functions for setting defaults on resources.
|
||||
type Defaulter = admission.Defaulter
|
||||
|
||||
// Validator defines functions for validating an operation
|
||||
// Validator defines functions for validating an operation.
|
||||
type Validator = admission.Validator
|
||||
|
||||
// AdmissionRequest defines the input for an admission handler.
|
||||
|
49
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go
generated
vendored
49
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go
generated
vendored
@ -17,7 +17,10 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
@ -32,9 +35,51 @@ var (
|
||||
},
|
||||
[]string{"webhook"},
|
||||
)
|
||||
|
||||
// RequestTotal is a prometheus metric which is a counter of the total processed admission requests.
|
||||
RequestTotal = func() *prometheus.CounterVec {
|
||||
return prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "controller_runtime_webhook_requests_total",
|
||||
Help: "Total number of admission requests by HTTP status code.",
|
||||
},
|
||||
[]string{"webhook", "code"},
|
||||
)
|
||||
}()
|
||||
|
||||
// RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests.
|
||||
RequestInFlight = func() *prometheus.GaugeVec {
|
||||
return prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "controller_runtime_webhook_requests_in_flight",
|
||||
Help: "Current number of admission requests being served.",
|
||||
},
|
||||
[]string{"webhook"},
|
||||
)
|
||||
}()
|
||||
)
|
||||
|
||||
func init() {
|
||||
metrics.Registry.MustRegister(
|
||||
RequestLatency)
|
||||
metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight)
|
||||
}
|
||||
|
||||
// InstrumentedHook adds some instrumentation on top of the given webhook.
|
||||
func InstrumentedHook(path string, hookRaw http.Handler) http.Handler {
|
||||
lbl := prometheus.Labels{"webhook": path}
|
||||
|
||||
lat := RequestLatency.MustCurryWith(lbl)
|
||||
cnt := RequestTotal.MustCurryWith(lbl)
|
||||
gge := RequestInFlight.With(lbl)
|
||||
|
||||
// Initialize the most likely HTTP status codes.
|
||||
cnt.WithLabelValues("200")
|
||||
cnt.WithLabelValues("500")
|
||||
|
||||
return promhttp.InstrumentHandlerDuration(
|
||||
lat,
|
||||
promhttp.InstrumentHandlerCounter(
|
||||
cnt,
|
||||
promhttp.InstrumentHandlerInFlight(gge, hookRaw),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
151
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go
generated
vendored
151
vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go
generated
vendored
@ -28,25 +28,32 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/certwatcher"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics"
|
||||
)
|
||||
|
||||
// DefaultPort is the default port that the webhook server serves.
|
||||
var DefaultPort = 443
|
||||
var DefaultPort = 9443
|
||||
|
||||
// Server is an admission webhook server that can serve traffic and
|
||||
// generates related k8s resources for deploying.
|
||||
//
|
||||
// TLS is required for a webhook to be accessed by kubernetes, so
|
||||
// you must provide a CertName and KeyName or have valid cert/key
|
||||
// at the default locations (tls.crt and tls.key). If you do not
|
||||
// want to configure TLS (i.e for testing purposes) run an
|
||||
// admission.StandaloneWebhook in your own server.
|
||||
type Server struct {
|
||||
// Host is the address that the server will listen on.
|
||||
// Defaults to "" - all addresses.
|
||||
Host string
|
||||
|
||||
// Port is the port number that the server will serve.
|
||||
// It will be defaulted to 443 if unspecified.
|
||||
// It will be defaulted to 9443 if unspecified.
|
||||
Port int
|
||||
|
||||
// CertDir is the directory that contains the server key and certificate. The
|
||||
@ -63,6 +70,10 @@ type Server struct {
|
||||
// Defaults to "", which means server does not verify client's certificate.
|
||||
ClientCAName string
|
||||
|
||||
// TLSVersion is the minimum version of TLS supported. Accepts
|
||||
// "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility)
|
||||
TLSMinVersion string
|
||||
|
||||
// WebhookMux is the multiplexer that handles different webhooks.
|
||||
WebhookMux *http.ServeMux
|
||||
|
||||
@ -75,6 +86,9 @@ type Server struct {
|
||||
|
||||
// defaultingOnce ensures that the default fields are only ever set once.
|
||||
defaultingOnce sync.Once
|
||||
|
||||
// mu protects access to the webhook map & setFields for Start, Register, etc
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// setDefaults does defaulting for the Server.
|
||||
@ -110,50 +124,87 @@ func (*Server) NeedLeaderElection() bool {
|
||||
// Register marks the given webhook as being served at the given path.
|
||||
// It panics if two hooks are registered on the same path.
|
||||
func (s *Server) Register(path string, hook http.Handler) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.defaultingOnce.Do(s.setDefaults)
|
||||
_, found := s.webhooks[path]
|
||||
if found {
|
||||
if _, found := s.webhooks[path]; found {
|
||||
panic(fmt.Errorf("can't register duplicate path: %v", path))
|
||||
}
|
||||
// TODO(directxman12): call setfields if we've already started the server
|
||||
s.webhooks[path] = hook
|
||||
s.WebhookMux.Handle(path, instrumentedHook(path, hook))
|
||||
log.Info("registering webhook", "path", path)
|
||||
}
|
||||
s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook))
|
||||
|
||||
// instrumentedHook adds some instrumentation on top of the given webhook.
|
||||
func instrumentedHook(path string, hookRaw http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
|
||||
startTS := time.Now()
|
||||
defer func() { metrics.RequestLatency.WithLabelValues(path).Observe(time.Since(startTS).Seconds()) }()
|
||||
hookRaw.ServeHTTP(resp, req)
|
||||
regLog := log.WithValues("path", path)
|
||||
regLog.Info("registering webhook")
|
||||
|
||||
// TODO(directxman12): add back in metric about total requests broken down by result?
|
||||
})
|
||||
}
|
||||
|
||||
// Start runs the server.
|
||||
// It will install the webhook related resources depend on the server configuration.
|
||||
func (s *Server) Start(stop <-chan struct{}) error {
|
||||
s.defaultingOnce.Do(s.setDefaults)
|
||||
|
||||
baseHookLog := log.WithName("webhooks")
|
||||
baseHookLog.Info("starting webhook server")
|
||||
|
||||
// inject fields here as opposed to in Register so that we're certain to have our setFields
|
||||
// function available.
|
||||
for hookPath, webhook := range s.webhooks {
|
||||
if err := s.setFields(webhook); err != nil {
|
||||
return err
|
||||
// we've already been "started", inject dependencies here.
|
||||
// Otherwise, InjectFunc will do this for us later.
|
||||
if s.setFields != nil {
|
||||
if err := s.setFields(hook); err != nil {
|
||||
// TODO(directxman12): swallowing this error isn't great, but we'd have to
|
||||
// change the signature to fix that
|
||||
regLog.Error(err, "unable to inject fields into webhook during registration")
|
||||
}
|
||||
|
||||
baseHookLog := log.WithName("webhooks")
|
||||
|
||||
// NB(directxman12): we don't propagate this further by wrapping setFields because it's
|
||||
// unclear if this is how we want to deal with log propagation. In this specific instance,
|
||||
// we want to be able to pass a logger to webhooks because they don't know their own path.
|
||||
if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil {
|
||||
return err
|
||||
if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil {
|
||||
regLog.Error(err, "unable to logger into webhook during registration")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartStandalone runs a webhook server without
|
||||
// a controller manager.
|
||||
func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error {
|
||||
// Use the Kubernetes client-go scheme if none is specified
|
||||
if scheme == nil {
|
||||
scheme = kscheme.Scheme
|
||||
}
|
||||
|
||||
if err := s.InjectFunc(func(i interface{}) error {
|
||||
if _, err := inject.SchemeInto(scheme, i); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Start(ctx)
|
||||
}
|
||||
|
||||
// tlsVersion converts from human-readable TLS version (for example "1.1")
|
||||
// to the values accepted by tls.Config (for example 0x301).
|
||||
func tlsVersion(version string) (uint16, error) {
|
||||
switch version {
|
||||
// default is previous behaviour
|
||||
case "":
|
||||
return tls.VersionTLS10, nil
|
||||
case "1.0":
|
||||
return tls.VersionTLS10, nil
|
||||
case "1.1":
|
||||
return tls.VersionTLS11, nil
|
||||
case "1.2":
|
||||
return tls.VersionTLS12, nil
|
||||
case "1.3":
|
||||
return tls.VersionTLS13, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version)
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs the server.
|
||||
// It will install the webhook related resources depend on the server configuration.
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
s.defaultingOnce.Do(s.setDefaults)
|
||||
|
||||
baseHookLog := log.WithName("webhooks")
|
||||
baseHookLog.Info("starting webhook server")
|
||||
|
||||
certPath := filepath.Join(s.CertDir, s.CertName)
|
||||
keyPath := filepath.Join(s.CertDir, s.KeyName)
|
||||
@ -164,14 +215,20 @@ func (s *Server) Start(stop <-chan struct{}) error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := certWatcher.Start(stop); err != nil {
|
||||
if err := certWatcher.Start(ctx); err != nil {
|
||||
log.Error(err, "certificate watcher error")
|
||||
}
|
||||
}()
|
||||
|
||||
cfg := &tls.Config{
|
||||
tlsMinVersion, err := tlsVersion(s.TLSMinVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := &tls.Config{ //nolint:gosec
|
||||
NextProtos: []string{"h2"},
|
||||
GetCertificate: certWatcher.GetCertificate,
|
||||
MinVersion: tlsMinVersion,
|
||||
}
|
||||
|
||||
// load CA to verify client certificate
|
||||
@ -191,7 +248,7 @@ func (s *Server) Start(stop <-chan struct{}) error {
|
||||
cfg.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))), cfg)
|
||||
listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -204,7 +261,7 @@ func (s *Server) Start(stop <-chan struct{}) error {
|
||||
|
||||
idleConnsClosed := make(chan struct{})
|
||||
go func() {
|
||||
<-stop
|
||||
<-ctx.Done()
|
||||
log.Info("shutting down webhook server")
|
||||
|
||||
// TODO: use a context with reasonable timeout
|
||||
@ -215,8 +272,7 @@ func (s *Server) Start(stop <-chan struct{}) error {
|
||||
close(idleConnsClosed)
|
||||
}()
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -227,5 +283,20 @@ func (s *Server) Start(stop <-chan struct{}) error {
|
||||
// InjectFunc injects the field setter into the server.
|
||||
func (s *Server) InjectFunc(f inject.Func) error {
|
||||
s.setFields = f
|
||||
|
||||
// inject fields here that weren't injected in Register because we didn't have setFields yet.
|
||||
baseHookLog := log.WithName("webhooks")
|
||||
for hookPath, webhook := range s.webhooks {
|
||||
if err := s.setFields(webhook); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// NB(directxman12): we don't propagate this further by wrapping setFields because it's
|
||||
// unclear if this is how we want to deal with log propagation. In this specific instance,
|
||||
// we want to be able to pass a logger to webhooks because they don't know their own path.
|
||||
if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user