mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to v1.20.0
updated kubernetes packages to latest release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
4abe128bd8
commit
83559144b1
4
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
4
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
@ -75,11 +75,11 @@ import (
|
||||
// to be read/written from a file as a JSON object.
|
||||
type Info struct {
|
||||
User string
|
||||
Password string
|
||||
Password string `datapolicy:"password"`
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
BearerToken string
|
||||
BearerToken string `datapolicy:"token"`
|
||||
Insecure *bool
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@ -26,7 +26,6 @@ reviewers:
|
||||
- pmorie
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- eparis
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
@ -38,8 +37,6 @@ reviewers:
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- jessfraz
|
||||
- david-mcmahon
|
||||
- mfojtik
|
||||
- mqliang
|
||||
- sdminonne
|
||||
- ncdc
|
||||
|
24
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
24
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@ -69,6 +69,12 @@ type Config struct {
|
||||
// question to this interface as a parameter. This is probably moot
|
||||
// now that this functionality appears at a higher level.
|
||||
RetryOnError bool
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
WatchErrorHandler WatchErrorHandler
|
||||
|
||||
// WatchListPageSize is the requested chunk size of initial and relist watch lists.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
@ -131,18 +137,22 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.WatchListPageSize = c.config.WatchListPageSize
|
||||
r.clock = c.clock
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
}
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
@ -183,9 +193,11 @@ func (c *controller) processLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// ResourceEventHandler can handle notifications for events that
|
||||
// happen to a resource. The events are informational only, so you
|
||||
// can't return an error. The handlers MUST NOT modify the objects
|
||||
// received; this concerns not only the top level of structure but all
|
||||
// the data structures reachable from it.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
@ -205,7 +217,8 @@ type ResourceEventHandler interface {
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
// ResourceEventHandler. This adapter does not remove the prohibition against
|
||||
// modifying the objects.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
@ -237,6 +250,7 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
// Like the handlers, the filter MUST NOT modify the objects it is given.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
|
82
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
82
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Queue which can be used to process changes to items.
|
||||
@ -41,7 +41,7 @@ import (
|
||||
// affects error retrying.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the comsumer
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
@ -99,7 +99,7 @@ type DeltaFIFOOptions struct {
|
||||
EmitDeltaTypeReplaced bool
|
||||
}
|
||||
|
||||
// NewDeltaFIFOWithOptions returns a Store which can be used process changes to
|
||||
// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to
|
||||
// items. See also the comment on DeltaFIFO.
|
||||
func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
if opts.KeyFunction == nil {
|
||||
@ -144,7 +144,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
// will always return an object of type Deltas.
|
||||
// will always return an object of type Deltas. List() returns
|
||||
// the newest object from each accumulator in the FIFO.
|
||||
//
|
||||
// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities
|
||||
// to list Store keys and to get objects by Store key. The objects in
|
||||
@ -160,14 +161,17 @@ type DeltaFIFO struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// We depend on the property that items in the set are in
|
||||
// the queue and vice versa, and that all Deltas in this
|
||||
// map have at least one Delta.
|
||||
// `items` maps a key to a Deltas.
|
||||
// Each such Deltas has at least one Delta.
|
||||
items map[string]Deltas
|
||||
|
||||
// `queue` maintains FIFO order of keys for consumption in Pop().
|
||||
// There are no duplicates in `queue`.
|
||||
// A key is in `queue` if and only if it is in `items`.
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
// or Delete/Add/Update/AddIfNotPresent was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
@ -180,11 +184,9 @@ type DeltaFIFO struct {
|
||||
// Replace(), and Resync()
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
closed bool
|
||||
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
// DeltaType when Replace() is called (to preserve backwards compat).
|
||||
@ -204,8 +206,8 @@ var (
|
||||
|
||||
// Close the queue.
|
||||
func (f *DeltaFIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
@ -226,7 +228,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
}
|
||||
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
// or the first batch of items inserted by Replace() has been popped.
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@ -283,6 +285,7 @@ func (f *DeltaFIFO) Delete(obj interface{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
// exist in items and/or KnownObjects
|
||||
return f.queueActionLocked(Deleted, obj)
|
||||
}
|
||||
|
||||
@ -333,6 +336,11 @@ func dedupDeltas(deltas Deltas) Deltas {
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
// `a` and `b` are duplicates. Only keep the one returned from isDup().
|
||||
// TODO: This extra array allocation and copy seems unnecessary if
|
||||
// all we do to dedup is compare the new delta with the last element
|
||||
// in `items`, which could be done by mutating `items` directly.
|
||||
// Might be worth profiling and investigating if it is safe to optimize.
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
}
|
||||
@ -369,8 +377,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
oldDeltas := f.items[id]
|
||||
newDeltas := append(oldDeltas, Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
if len(newDeltas) > 0 {
|
||||
@ -382,10 +390,14 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
} else {
|
||||
// This never happens, because dedupDeltas never returns an empty list
|
||||
// when given a non-empty list (as it is here).
|
||||
// But if somehow it ever does return an empty list, then
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
// If somehow it happens anyway, deal with it but complain.
|
||||
if oldDeltas == nil {
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj)
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
f.items[id] = newDeltas
|
||||
return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -447,20 +459,22 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
||||
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// Pop blocks until the queue has some items, and then returns one. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
// is returned, so if you don't successfully process it, you need to add it back
|
||||
// with AddIfNotPresent().
|
||||
// process function is called under lock, so it is safe update data structures
|
||||
// process function is called under lock, so it is safe to update data structures
|
||||
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
|
||||
// may return an instance of ErrRequeue with a nested error to indicate the current
|
||||
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
|
||||
// process should avoid expensive I/O operation so that other queue operations, i.e.
|
||||
// Add() and Get(), won't be blocked for too long.
|
||||
//
|
||||
// Pop returns a 'Deltas', which has a complete list of all the things
|
||||
// that happened to the object (deltas) while it was sitting in the queue.
|
||||
@ -472,7 +486,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
if f.closed {
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
@ -485,7 +499,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
// This should never happen
|
||||
klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id)
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
@ -521,6 +536,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
action = Replaced
|
||||
}
|
||||
|
||||
// Add Sync/Replaced action for each new item.
|
||||
for _, item := range list {
|
||||
key, err := f.KeyOf(item)
|
||||
if err != nil {
|
||||
@ -539,6 +555,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
@ -650,7 +669,8 @@ type KeyLister interface {
|
||||
|
||||
// A KeyGetter is anything that knows how to get the value stored under a given key.
|
||||
type KeyGetter interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
// GetByKey returns the value associated with the key, or sets exists=false.
|
||||
GetByKey(key string) (value interface{}, exists bool, err error)
|
||||
}
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
@ -713,10 +733,10 @@ func copyDeltas(d Deltas) Deltas {
|
||||
return d2
|
||||
}
|
||||
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
|
||||
// an object was deleted but the watch deletion event was missed. In this
|
||||
// case we don't know the final "resting" state of the object, so there's
|
||||
// a chance the included `Obj` is stale.
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object
|
||||
// was deleted but the watch deletion event was missed while disconnected from
|
||||
// apiserver. In this case we don't know the final "resting" state of the object, so
|
||||
// there's a chance the included `Obj` is stale.
|
||||
type DeletedFinalStateUnknown struct {
|
||||
Key string
|
||||
Obj interface{}
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
19
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
19
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@ -71,8 +71,8 @@ type Queue interface {
|
||||
|
||||
// HasSynced returns true if the first batch of keys have all been
|
||||
// popped. The first batch of keys are those of the first Replace
|
||||
// operation if that happened before any Add, Update, or Delete;
|
||||
// otherwise the first batch is empty.
|
||||
// operation if that happened before any Add, AddIfNotPresent,
|
||||
// Update, or Delete; otherwise the first batch is empty.
|
||||
HasSynced() bool
|
||||
|
||||
// Close the queue
|
||||
@ -128,8 +128,7 @@ type FIFO struct {
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
var (
|
||||
@ -138,14 +137,14 @@ var (
|
||||
|
||||
// Close the queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
// or the first batch of items inserted by Replace() has been popped.
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@ -262,8 +261,8 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
@ -284,7 +283,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
if f.closed {
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
|
107
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
107
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -39,7 +39,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
@ -69,6 +69,8 @@ type Reflector struct {
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
@ -95,6 +97,46 @@ type Reflector struct {
|
||||
// etcd, which is significantly less efficient and may lead to serious performance and
|
||||
// scalability problems.
|
||||
WatchListPageSize int64
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
// track the current resource version of the reflector. This is especially
|
||||
// important if storage bookmarks are enabled.
|
||||
type ResourceVersionUpdater interface {
|
||||
// UpdateResourceVersion is called each time current resource version of the reflector
|
||||
// is updated.
|
||||
UpdateResourceVersion(resourceVersion string)
|
||||
}
|
||||
|
||||
// The WatchErrorHandler is called whenever ListAndWatch drops the
|
||||
// connection with an error. After calling this handler, the informer
|
||||
// will backoff and retry.
|
||||
//
|
||||
// The default implementation looks at the error type and tries to log
|
||||
// the error message at an appropriate level.
|
||||
//
|
||||
// Implementations of this handler may display the error message in other
|
||||
// ways. Implementations should return quickly - any expensive processing
|
||||
// should be offloaded.
|
||||
type WatchErrorHandler func(r *Reflector, err error)
|
||||
|
||||
// DefaultWatchErrorHandler is the default implementation of WatchErrorHandler
|
||||
func DefaultWatchErrorHandler(r *Reflector, err error) {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case err == io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -135,9 +177,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
return r
|
||||
@ -175,7 +219,7 @@ func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
@ -276,7 +320,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
@ -297,17 +341,17 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
@ -369,28 +413,15 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case err == io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
}
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
time.Sleep(time.Second)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
@ -485,6 +516,9 @@ loop:
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(newResourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
@ -551,5 +585,26 @@ func isExpiredError(err error) bool {
|
||||
}
|
||||
|
||||
func isTooLargeResourceVersionError(err error) bool {
|
||||
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
|
||||
if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) {
|
||||
return true
|
||||
}
|
||||
// In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to
|
||||
// metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource
|
||||
// version is larger than the largest currently available resource version. To ensure backward
|
||||
// compatibility with these server versions we also need to detect the error based on the content
|
||||
// of the error message field.
|
||||
if !apierrors.IsTimeout(err) {
|
||||
return false
|
||||
}
|
||||
apierr, ok := err.(apierrors.APIStatus)
|
||||
if !ok || apierr == nil || apierr.Status().Details == nil {
|
||||
return false
|
||||
}
|
||||
for _, cause := range apierr.Status().Details.Causes {
|
||||
// Matches the message returned by api server 1.17.0-1.18.5 for this error condition
|
||||
if cause.Message == "Too large resource version" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
77
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
77
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -28,35 +28,37 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// SharedInformer provides eventually consistent linkage of its
|
||||
// clients to the authoritative state of a given collection of
|
||||
// objects. An object is identified by its API group, kind/resource,
|
||||
// namespace, and name; the `ObjectMeta.UID` is not part of an
|
||||
// object's ID as far as this contract is concerned. One
|
||||
// namespace (if any), and name; the `ObjectMeta.UID` is not part of
|
||||
// an object's ID as far as this contract is concerned. One
|
||||
// SharedInformer provides linkage to objects of a particular API
|
||||
// group and kind/resource. The linked object collection of a
|
||||
// SharedInformer may be further restricted to one namespace and/or by
|
||||
// label selector and/or field selector.
|
||||
// SharedInformer may be further restricted to one namespace (if
|
||||
// applicable) and/or by label selector and/or field selector.
|
||||
//
|
||||
// The authoritative state of an object is what apiservers provide
|
||||
// access to, and an object goes through a strict sequence of states.
|
||||
// An object state is either "absent" or present with a
|
||||
// ResourceVersion and other appropriate content.
|
||||
// An object state is either (1) present with a ResourceVersion and
|
||||
// other appropriate content or (2) "absent".
|
||||
//
|
||||
// A SharedInformer maintains a local cache, exposed by GetStore() and
|
||||
// by GetIndexer() in the case of an indexed informer, of the state of
|
||||
// each relevant object. This cache is eventually consistent with the
|
||||
// authoritative state. This means that, unless prevented by
|
||||
// persistent communication problems, if ever a particular object ID X
|
||||
// is authoritatively associated with a state S then for every
|
||||
// SharedInformer I whose collection includes (X, S) eventually either
|
||||
// (1) I's cache associates X with S or a later state of X, (2) I is
|
||||
// stopped, or (3) the authoritative state service for X terminates.
|
||||
// To be formally complete, we say that the absent state meets any
|
||||
// restriction by label selector or field selector.
|
||||
// A SharedInformer maintains a local cache --- exposed by GetStore(),
|
||||
// by GetIndexer() in the case of an indexed informer, and possibly by
|
||||
// machinery involved in creating and/or accessing the informer --- of
|
||||
// the state of each relevant object. This cache is eventually
|
||||
// consistent with the authoritative state. This means that, unless
|
||||
// prevented by persistent communication problems, if ever a
|
||||
// particular object ID X is authoritatively associated with a state S
|
||||
// then for every SharedInformer I whose collection includes (X, S)
|
||||
// eventually either (1) I's cache associates X with S or a later
|
||||
// state of X, (2) I is stopped, or (3) the authoritative state
|
||||
// service for X terminates. To be formally complete, we say that the
|
||||
// absent state meets any restriction by label selector or field
|
||||
// selector.
|
||||
//
|
||||
// For a given informer and relevant object ID X, the sequence of
|
||||
// states that appears in the informer's cache is a subsequence of the
|
||||
@ -98,7 +100,7 @@ import (
|
||||
// added before `Run()`, eventually either the SharedInformer is
|
||||
// stopped or the client is notified of the update. A client added
|
||||
// after `Run()` starts gets a startup batch of notifications of
|
||||
// additions of the object existing in the cache at the time that
|
||||
// additions of the objects existing in the cache at the time that
|
||||
// client was added; also, for every update to the SharedInformer's
|
||||
// local cache after that client was added, eventually either the
|
||||
// SharedInformer is stopped or that client is notified of that
|
||||
@ -163,6 +165,21 @@ type SharedInformer interface {
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
// thread-safe.
|
||||
LastSyncResourceVersion() string
|
||||
|
||||
// The WatchErrorHandler is called whenever ListAndWatch drops the
|
||||
// connection with an error. After calling this handler, the informer
|
||||
// will backoff and retry.
|
||||
//
|
||||
// The default implementation looks at the error type and tries to log
|
||||
// the error message at an appropriate level.
|
||||
//
|
||||
// There's only one handler, so if you call this multiple times, last one
|
||||
// wins; calling after the informer has been started returns an error.
|
||||
//
|
||||
// The handler is intended for visibility, not to e.g. pause the consumers.
|
||||
// The handler should return quickly - any expensive processing should be
|
||||
// offloaded.
|
||||
SetWatchErrorHandler(handler WatchErrorHandler) error
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
@ -298,6 +315,9 @@ type sharedIndexInformer struct {
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
@ -333,6 +353,18 @@ type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
s.watchErrorHandler = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
@ -349,7 +381,8 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
func() {
|
||||
@ -452,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
|
3
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
3
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
@ -98,6 +98,9 @@ func ShortenConfig(config *Config) {
|
||||
if len(authInfo.ClientCertificateData) > 0 {
|
||||
authInfo.ClientCertificateData = redactedBytes
|
||||
}
|
||||
if len(authInfo.Token) > 0 {
|
||||
authInfo.Token = "REDACTED"
|
||||
}
|
||||
config.AuthInfos[key] = authInfo
|
||||
}
|
||||
for key, cluster := range config.Clusters {
|
||||
|
60
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
60
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
@ -82,6 +82,17 @@ type Cluster struct {
|
||||
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
|
||||
// +optional
|
||||
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
|
||||
// ProxyURL is the URL to the proxy to be used for all requests made by this
|
||||
// client. URLs with "http", "https", and "socks5" schemes are supported. If
|
||||
// this configuration is not provided or the empty string, the client
|
||||
// attempts to construct a proxy configuration from http_proxy and
|
||||
// https_proxy environment variables. If these environment variables are not
|
||||
// set, the client does not attempt to proxy requests.
|
||||
//
|
||||
// socks5 proxying does not currently support spdy streaming endpoints (exec,
|
||||
// attach, port forward).
|
||||
// +optional
|
||||
ProxyURL string `json:"proxy-url,omitempty"`
|
||||
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
|
||||
// +optional
|
||||
Extensions map[string]runtime.Object `json:"extensions,omitempty"`
|
||||
@ -103,10 +114,10 @@ type AuthInfo struct {
|
||||
ClientKey string `json:"client-key,omitempty"`
|
||||
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
|
||||
// +optional
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty"`
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
|
||||
// Token is the bearer token for authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty"`
|
||||
Token string `json:"token,omitempty" datapolicy:"token"`
|
||||
// TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence.
|
||||
// +optional
|
||||
TokenFile string `json:"tokenFile,omitempty"`
|
||||
@ -124,7 +135,7 @@ type AuthInfo struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
// Password is the password for basic authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Password string `json:"password,omitempty"`
|
||||
Password string `json:"password,omitempty" datapolicy:"password"`
|
||||
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
|
||||
// +optional
|
||||
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
|
||||
@ -182,7 +193,7 @@ func (c AuthProviderConfig) String() string {
|
||||
// ExecConfig specifies a command to provide client credentials. The command is exec'd
|
||||
// and outputs structured stdout holding credentials.
|
||||
//
|
||||
// See the client.authentiction.k8s.io API group for specifications of the exact input
|
||||
// See the client.authentication.k8s.io API group for specifications of the exact input
|
||||
// and output format
|
||||
type ExecConfig struct {
|
||||
// Command to execute.
|
||||
@ -199,6 +210,41 @@ type ExecConfig struct {
|
||||
// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
|
||||
// the same encoding version as the input.
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// This text is shown to the user when the executable doesn't seem to be
|
||||
// present. For example, `brew install foo-cli` might be a good InstallHint for
|
||||
// foo-cli on Mac OS systems.
|
||||
InstallHint string `json:"installHint,omitempty"`
|
||||
|
||||
// ProvideClusterInfo determines whether or not to provide cluster information,
|
||||
// which could potentially contain very large CA data, to this exec plugin as a
|
||||
// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
|
||||
// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
|
||||
// reading this environment variable.
|
||||
ProvideClusterInfo bool `json:"provideClusterInfo"`
|
||||
|
||||
// Config holds additional config data that is specific to the exec
|
||||
// plugin with regards to the cluster being authenticated to.
|
||||
//
|
||||
// This data is sourced from the clientcmd Cluster object's extensions[exec] field:
|
||||
//
|
||||
// clusters:
|
||||
// - name: my-cluster
|
||||
// cluster:
|
||||
// ...
|
||||
// extensions:
|
||||
// - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config
|
||||
// extension:
|
||||
// audience: 06e3fbd18de8 # arbitrary config
|
||||
//
|
||||
// In some environments, the user config may be exactly the same across many clusters
|
||||
// (i.e. call this exec plugin) minus some details that are specific to each cluster
|
||||
// such as the audience. This field allows the per cluster config to be directly
|
||||
// specified with the cluster info. Using this field to store secret data is not
|
||||
// recommended as one of the prime benefits of exec plugins is that no secrets need
|
||||
// to be stored directly in the kubeconfig.
|
||||
// +k8s:conversion-gen=false
|
||||
Config runtime.Object
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(ExecConfig)
|
||||
@ -221,7 +267,11 @@ func (c ExecConfig) String() string {
|
||||
if len(c.Env) > 0 {
|
||||
env = "[]ExecEnvVar{--- REDACTED ---}"
|
||||
}
|
||||
return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
|
||||
config := "runtime.Object(nil)"
|
||||
if c.Config != nil {
|
||||
config = "runtime.Object(--- REDACTED ---)"
|
||||
}
|
||||
return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config)
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
|
31
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
31
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
@ -75,6 +75,17 @@ type Cluster struct {
|
||||
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
|
||||
// +optional
|
||||
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
|
||||
// ProxyURL is the URL to the proxy to be used for all requests made by this
|
||||
// client. URLs with "http", "https", and "socks5" schemes are supported. If
|
||||
// this configuration is not provided or the empty string, the client
|
||||
// attempts to construct a proxy configuration from http_proxy and
|
||||
// https_proxy environment variables. If these environment variables are not
|
||||
// set, the client does not attempt to proxy requests.
|
||||
//
|
||||
// socks5 proxying does not currently support spdy streaming endpoints (exec,
|
||||
// attach, port forward).
|
||||
// +optional
|
||||
ProxyURL string `json:"proxy-url,omitempty"`
|
||||
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
|
||||
// +optional
|
||||
Extensions []NamedExtension `json:"extensions,omitempty"`
|
||||
@ -93,10 +104,10 @@ type AuthInfo struct {
|
||||
ClientKey string `json:"client-key,omitempty"`
|
||||
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
|
||||
// +optional
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty"`
|
||||
ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
|
||||
// Token is the bearer token for authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty"`
|
||||
Token string `json:"token,omitempty" datapolicy:"token"`
|
||||
// TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence.
|
||||
// +optional
|
||||
TokenFile string `json:"tokenFile,omitempty"`
|
||||
@ -114,7 +125,7 @@ type AuthInfo struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
// Password is the password for basic authentication to the kubernetes cluster.
|
||||
// +optional
|
||||
Password string `json:"password,omitempty"`
|
||||
Password string `json:"password,omitempty" datapolicy:"password"`
|
||||
// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
|
||||
// +optional
|
||||
AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
|
||||
@ -181,7 +192,7 @@ type AuthProviderConfig struct {
|
||||
// ExecConfig specifies a command to provide client credentials. The command is exec'd
|
||||
// and outputs structured stdout holding credentials.
|
||||
//
|
||||
// See the client.authentiction.k8s.io API group for specifications of the exact input
|
||||
// See the client.authentication.k8s.io API group for specifications of the exact input
|
||||
// and output format
|
||||
type ExecConfig struct {
|
||||
// Command to execute.
|
||||
@ -198,6 +209,18 @@ type ExecConfig struct {
|
||||
// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
|
||||
// the same encoding version as the input.
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// This text is shown to the user when the executable doesn't seem to be
|
||||
// present. For example, `brew install foo-cli` might be a good InstallHint for
|
||||
// foo-cli on Mac OS systems.
|
||||
InstallHint string `json:"installHint,omitempty"`
|
||||
|
||||
// ProvideClusterInfo determines whether or not to provide cluster information,
|
||||
// which could potentially contain very large CA data, to this exec plugin as a
|
||||
// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
|
||||
// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
|
||||
// reading this environment variable.
|
||||
ProvideClusterInfo bool `json:"provideClusterInfo"`
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
|
27
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
27
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
@ -171,7 +171,15 @@ func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s
|
||||
out.Username = in.Username
|
||||
out.Password = in.Password
|
||||
out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
|
||||
out.Exec = (*api.ExecConfig)(unsafe.Pointer(in.Exec))
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(api.ExecConfig)
|
||||
if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Exec = nil
|
||||
}
|
||||
if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -197,7 +205,15 @@ func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s
|
||||
out.Username = in.Username
|
||||
out.Password = in.Password
|
||||
out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
|
||||
out.Exec = (*ExecConfig)(unsafe.Pointer(in.Exec))
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(ExecConfig)
|
||||
if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.Exec = nil
|
||||
}
|
||||
if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -237,6 +253,7 @@ func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conv
|
||||
out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
|
||||
out.CertificateAuthority = in.CertificateAuthority
|
||||
out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
|
||||
out.ProxyURL = in.ProxyURL
|
||||
if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -255,6 +272,7 @@ func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conv
|
||||
out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
|
||||
out.CertificateAuthority = in.CertificateAuthority
|
||||
out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
|
||||
out.ProxyURL = in.ProxyURL
|
||||
if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -356,6 +374,8 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo
|
||||
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
|
||||
out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env))
|
||||
out.APIVersion = in.APIVersion
|
||||
out.InstallHint = in.InstallHint
|
||||
out.ProvideClusterInfo = in.ProvideClusterInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -369,6 +389,9 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo
|
||||
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
|
||||
out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env))
|
||||
out.APIVersion = in.APIVersion
|
||||
out.InstallHint = in.InstallHint
|
||||
out.ProvideClusterInfo = in.ProvideClusterInfo
|
||||
// INFO: in.Config opted out of conversion generation
|
||||
return nil
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
3
vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
generated
vendored
@ -267,6 +267,9 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
|
||||
*out = make([]ExecEnvVar, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
out.Config = in.Config.DeepCopyObject()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
116
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
116
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
@ -20,16 +20,23 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"k8s.io/klog"
|
||||
"unicode"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientauth "k8s.io/client-go/tools/auth"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
const (
|
||||
// clusterExtensionKey is reserved in the cluster extensions list for exec plugin config.
|
||||
clusterExtensionKey = "client.authentication.k8s.io/exec"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -70,7 +77,7 @@ type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderC
|
||||
|
||||
type promptedCredentials struct {
|
||||
username string
|
||||
password string
|
||||
password string `datapolicy:"password"`
|
||||
}
|
||||
|
||||
// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
|
||||
@ -150,8 +157,15 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
|
||||
clientConfig := &restclient.Config{}
|
||||
clientConfig.Host = configClusterInfo.Server
|
||||
if configClusterInfo.ProxyURL != "" {
|
||||
u, err := parseProxyURL(configClusterInfo.ProxyURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientConfig.Proxy = http.ProxyURL(u)
|
||||
}
|
||||
|
||||
if len(config.overrides.Timeout) > 0 {
|
||||
if config.overrides != nil && len(config.overrides.Timeout) > 0 {
|
||||
timeout, err := ParseTimeout(config.overrides.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -180,7 +194,7 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
authInfoName, _ := config.getAuthInfoName()
|
||||
persister = PersisterForUser(config.configAccess, authInfoName)
|
||||
}
|
||||
userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister)
|
||||
userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -223,7 +237,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
|
||||
// 4. if there is not enough information to identify the user, prompt if possible
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) {
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
|
||||
mergedConfig := &restclient.Config{}
|
||||
|
||||
// blindly overwrite existing values based on precedence
|
||||
@ -261,6 +275,8 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
||||
}
|
||||
if configAuthInfo.Exec != nil {
|
||||
mergedConfig.ExecProvider = configAuthInfo.Exec
|
||||
mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint)
|
||||
mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey]
|
||||
}
|
||||
|
||||
// if there still isn't enough information to authenticate the user, try prompting
|
||||
@ -306,6 +322,41 @@ func canIdentifyUser(config restclient.Config) bool {
|
||||
config.ExecProvider != nil
|
||||
}
|
||||
|
||||
// cleanANSIEscapeCodes takes an arbitrary string and ensures that there are no
|
||||
// ANSI escape sequences that could put the terminal in a weird state (e.g.,
|
||||
// "\e[1m" bolds text)
|
||||
func cleanANSIEscapeCodes(s string) string {
|
||||
// spaceControlCharacters includes tab, new line, vertical tab, new page, and
|
||||
// carriage return. These are in the unicode.Cc category, but that category also
|
||||
// contains ESC (U+001B) which we don't want.
|
||||
spaceControlCharacters := unicode.RangeTable{
|
||||
R16: []unicode.Range16{
|
||||
{Lo: 0x0009, Hi: 0x000D, Stride: 1},
|
||||
},
|
||||
}
|
||||
|
||||
// Why not make this deny-only (instead of allow-only)? Because unicode.C
|
||||
// contains newline and tab characters that we want.
|
||||
allowedRanges := []*unicode.RangeTable{
|
||||
unicode.L,
|
||||
unicode.M,
|
||||
unicode.N,
|
||||
unicode.P,
|
||||
unicode.S,
|
||||
unicode.Z,
|
||||
&spaceControlCharacters,
|
||||
}
|
||||
builder := strings.Builder{}
|
||||
for _, roon := range s {
|
||||
if unicode.IsOneOf(allowedRanges, roon) {
|
||||
builder.WriteRune(roon) // returns nil error, per go doc
|
||||
} else {
|
||||
fmt.Fprintf(&builder, "%U", roon)
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// Namespace implements ClientConfig
|
||||
func (config *DirectClientConfig) Namespace() (string, bool, error) {
|
||||
if config.overrides != nil && config.overrides.Context.Namespace != "" {
|
||||
@ -373,7 +424,7 @@ func (config *DirectClientConfig) ConfirmUsable() error {
|
||||
// getContextName returns the default, or user-set context name, and a boolean that indicates
|
||||
// whether the default context name has been overwritten by a user-set flag, or left as its default value
|
||||
func (config *DirectClientConfig) getContextName() (string, bool) {
|
||||
if len(config.overrides.CurrentContext) != 0 {
|
||||
if config.overrides != nil && len(config.overrides.CurrentContext) != 0 {
|
||||
return config.overrides.CurrentContext, true
|
||||
}
|
||||
if len(config.contextName) != 0 {
|
||||
@ -387,7 +438,7 @@ func (config *DirectClientConfig) getContextName() (string, bool) {
|
||||
// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or
|
||||
// left as its default value
|
||||
func (config *DirectClientConfig) getAuthInfoName() (string, bool) {
|
||||
if len(config.overrides.Context.AuthInfo) != 0 {
|
||||
if config.overrides != nil && len(config.overrides.Context.AuthInfo) != 0 {
|
||||
return config.overrides.Context.AuthInfo, true
|
||||
}
|
||||
context, _ := config.getContext()
|
||||
@ -398,7 +449,7 @@ func (config *DirectClientConfig) getAuthInfoName() (string, bool) {
|
||||
// indicating whether the default clusterName has been overwritten by a user-set flag, or left as
|
||||
// its default value
|
||||
func (config *DirectClientConfig) getClusterName() (string, bool) {
|
||||
if len(config.overrides.Context.Cluster) != 0 {
|
||||
if config.overrides != nil && len(config.overrides.Context.Cluster) != 0 {
|
||||
return config.overrides.Context.Cluster, true
|
||||
}
|
||||
context, _ := config.getContext()
|
||||
@ -416,7 +467,9 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
|
||||
} else if required {
|
||||
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
|
||||
}
|
||||
mergo.MergeWithOverwrite(mergedContext, config.overrides.Context)
|
||||
if config.overrides != nil {
|
||||
mergo.MergeWithOverwrite(mergedContext, config.overrides.Context)
|
||||
}
|
||||
|
||||
return *mergedContext, nil
|
||||
}
|
||||
@ -432,7 +485,9 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
|
||||
} else if required {
|
||||
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
|
||||
}
|
||||
mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo)
|
||||
if config.overrides != nil {
|
||||
mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo)
|
||||
}
|
||||
|
||||
return *mergedAuthInfo, nil
|
||||
}
|
||||
@ -443,30 +498,37 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
|
||||
clusterInfoName, required := config.getClusterName()
|
||||
|
||||
mergedClusterInfo := clientcmdapi.NewCluster()
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults)
|
||||
if config.overrides != nil {
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults)
|
||||
}
|
||||
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, configClusterInfo)
|
||||
} else if required {
|
||||
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
|
||||
}
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo)
|
||||
if config.overrides != nil {
|
||||
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo)
|
||||
}
|
||||
|
||||
// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
|
||||
// otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set".
|
||||
// * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence.
|
||||
caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
|
||||
caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)
|
||||
if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 {
|
||||
mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify
|
||||
mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority
|
||||
mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData
|
||||
}
|
||||
if config.overrides != nil {
|
||||
caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
|
||||
caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)
|
||||
if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 {
|
||||
mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify
|
||||
mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority
|
||||
mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData
|
||||
}
|
||||
|
||||
// if the --tls-server-name has been set in overrides, use that value.
|
||||
// if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too. This gives the property
|
||||
// that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is
|
||||
// usually correct.
|
||||
if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" {
|
||||
mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName
|
||||
// if the --tls-server-name has been set in overrides, use that value.
|
||||
// if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too. This gives the property
|
||||
// that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is
|
||||
// usually correct.
|
||||
if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" {
|
||||
mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName
|
||||
}
|
||||
}
|
||||
|
||||
return *mergedClusterInfo, nil
|
||||
|
34
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
34
vendor/k8s.io/client-go/tools/clientcmd/config.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -58,6 +58,15 @@ type PathOptions struct {
|
||||
LoadingRules *ClientConfigLoadingRules
|
||||
}
|
||||
|
||||
var (
|
||||
// UseModifyConfigLock ensures that access to kubeconfig file using ModifyConfig method
|
||||
// is being guarded by a lock file.
|
||||
// This variable is intentionaly made public so other consumers of this library
|
||||
// can modify its default behavior, but be caution when disabling it since
|
||||
// this will make your code not threadsafe.
|
||||
UseModifyConfigLock = true
|
||||
)
|
||||
|
||||
func (o *PathOptions) GetEnvVarFiles() []string {
|
||||
if len(o.EnvVar) == 0 {
|
||||
return []string{}
|
||||
@ -74,10 +83,13 @@ func (o *PathOptions) GetEnvVarFiles() []string {
|
||||
}
|
||||
|
||||
func (o *PathOptions) GetLoadingPrecedence() []string {
|
||||
if o.IsExplicitFile() {
|
||||
return []string{o.GetExplicitFile()}
|
||||
}
|
||||
|
||||
if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
|
||||
return envVarFiles
|
||||
}
|
||||
|
||||
return []string{o.GlobalFile}
|
||||
}
|
||||
|
||||
@ -156,15 +168,17 @@ func NewDefaultPathOptions() *PathOptions {
|
||||
// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any
|
||||
// modified element.
|
||||
func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
|
||||
possibleSources := configAccess.GetLoadingPrecedence()
|
||||
// sort the possible kubeconfig files so we always "lock" in the same order
|
||||
// to avoid deadlock (note: this can fail w/ symlinks, but... come on).
|
||||
sort.Strings(possibleSources)
|
||||
for _, filename := range possibleSources {
|
||||
if err := lockFile(filename); err != nil {
|
||||
return err
|
||||
if UseModifyConfigLock {
|
||||
possibleSources := configAccess.GetLoadingPrecedence()
|
||||
// sort the possible kubeconfig files so we always "lock" in the same order
|
||||
// to avoid deadlock (note: this can fail w/ symlinks, but... come on).
|
||||
sort.Strings(possibleSources)
|
||||
for _, filename := range possibleSources {
|
||||
if err := lockFile(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlockFile(filename)
|
||||
}
|
||||
defer unlockFile(filename)
|
||||
}
|
||||
|
||||
startingConfig, err := configAccess.GetStartingConfig()
|
||||
|
15
vendor/k8s.io/client-go/tools/clientcmd/helpers.go
generated
vendored
15
vendor/k8s.io/client-go/tools/clientcmd/helpers.go
generated
vendored
@ -18,6 +18,7 @@ package clientcmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@ -33,3 +34,17 @@ func ParseTimeout(duration string) (time.Duration, error) {
|
||||
}
|
||||
return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)")
|
||||
}
|
||||
|
||||
func parseProxyURL(proxyURL string) (*url.URL, error) {
|
||||
u, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse: %v", proxyURL)
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "http", "https", "socks5":
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported scheme %q, must be http, https, or socks5", u.Scheme)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
6
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
6
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -304,6 +304,10 @@ func (rules *ClientConfigLoadingRules) Migrate() error {
|
||||
|
||||
// GetLoadingPrecedence implements ConfigAccess
|
||||
func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
|
||||
if len(rules.ExplicitPath) > 0 {
|
||||
return []string{rules.ExplicitPath}
|
||||
}
|
||||
|
||||
return rules.Precedence
|
||||
}
|
||||
|
||||
|
37
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
37
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -60,27 +60,26 @@ func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overri
|
||||
}
|
||||
|
||||
func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
|
||||
if config.clientConfig == nil {
|
||||
config.loadingLock.Lock()
|
||||
defer config.loadingLock.Unlock()
|
||||
config.loadingLock.Lock()
|
||||
defer config.loadingLock.Unlock()
|
||||
|
||||
if config.clientConfig == nil {
|
||||
mergedConfig, err := config.loader.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mergedClientConfig ClientConfig
|
||||
if config.fallbackReader != nil {
|
||||
mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader)
|
||||
} else {
|
||||
mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader)
|
||||
}
|
||||
|
||||
config.clientConfig = mergedClientConfig
|
||||
}
|
||||
if config.clientConfig != nil {
|
||||
return config.clientConfig, nil
|
||||
}
|
||||
mergedConfig, err := config.loader.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var currentContext string
|
||||
if config.overrides != nil {
|
||||
currentContext = config.overrides.CurrentContext
|
||||
}
|
||||
if config.fallbackReader != nil {
|
||||
config.clientConfig = NewInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.fallbackReader, config.loader)
|
||||
} else {
|
||||
config.clientConfig = NewNonInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.loader)
|
||||
}
|
||||
return config.clientConfig, nil
|
||||
}
|
||||
|
||||
|
29
vendor/k8s.io/client-go/tools/clientcmd/validation.go
generated
vendored
29
vendor/k8s.io/client-go/tools/clientcmd/validation.go
generated
vendored
@ -30,11 +30,24 @@ import (
|
||||
|
||||
var (
|
||||
ErrNoContext = errors.New("no context chosen")
|
||||
ErrEmptyConfig = errors.New("no configuration has been provided, try setting KUBERNETES_MASTER environment variable")
|
||||
ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable")
|
||||
// message is for consistency with old behavior
|
||||
ErrEmptyCluster = errors.New("cluster has no server defined")
|
||||
)
|
||||
|
||||
// NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error
|
||||
func NewEmptyConfigError(message string) error {
|
||||
return &errEmptyConfig{message}
|
||||
}
|
||||
|
||||
type errEmptyConfig struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *errEmptyConfig) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
type errContextNotFound struct {
|
||||
ContextName string
|
||||
}
|
||||
@ -60,9 +73,14 @@ func IsContextNotFound(err error) bool {
|
||||
func IsEmptyConfig(err error) bool {
|
||||
switch t := err.(type) {
|
||||
case errConfigurationInvalid:
|
||||
return len(t) == 1 && t[0] == ErrEmptyConfig
|
||||
if len(t) != 1 {
|
||||
return false
|
||||
}
|
||||
_, ok := t[0].(*errEmptyConfig)
|
||||
return ok
|
||||
}
|
||||
return err == ErrEmptyConfig
|
||||
_, ok := err.(*errEmptyConfig)
|
||||
return ok
|
||||
}
|
||||
|
||||
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
|
||||
@ -209,6 +227,11 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [
|
||||
validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
|
||||
}
|
||||
}
|
||||
if proxyURL := clusterInfo.ProxyURL; proxyURL != "" {
|
||||
if _, err := parseProxyURL(proxyURL); err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %v", proxyURL, clusterName, err))
|
||||
}
|
||||
}
|
||||
// Make sure CA data and CA file aren't both specified
|
||||
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName))
|
||||
|
1
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
1
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
@ -8,7 +8,6 @@ reviewers:
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- gmarek
|
||||
- eparis
|
||||
- timothysc
|
||||
- ingvagabund
|
||||
- resouer
|
||||
|
23
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
23
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -65,7 +65,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -188,17 +188,17 @@ type LeaderElector struct {
|
||||
clock clock.Clock
|
||||
|
||||
metrics leaderMetricsAdapter
|
||||
|
||||
// name is the name of the resource lock for debugging
|
||||
name string
|
||||
}
|
||||
|
||||
// Run starts the leader election loop
|
||||
// Run starts the leader election loop. Run will not return
|
||||
// before leader election loop is stopped by ctx or it has
|
||||
// stopped holding the leader lease
|
||||
func (le *LeaderElector) Run(ctx context.Context) {
|
||||
defer runtime.HandleCrash()
|
||||
defer func() {
|
||||
runtime.HandleCrash()
|
||||
le.config.Callbacks.OnStoppedLeading()
|
||||
}()
|
||||
|
||||
if !le.acquire(ctx) {
|
||||
return // ctx signalled done
|
||||
}
|
||||
@ -209,7 +209,8 @@ func (le *LeaderElector) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
// RunOrDie starts a client with the provided config or panics if the config
|
||||
// fails to validate.
|
||||
// fails to validate. RunOrDie blocks until leader election loop is
|
||||
// stopped by ctx or it has stopped holding the leader lease
|
||||
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
|
||||
le, err := NewLeaderElector(lec)
|
||||
if err != nil {
|
||||
@ -239,7 +240,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
|
||||
defer cancel()
|
||||
succeeded := false
|
||||
desc := le.config.Lock.Describe()
|
||||
klog.Infof("attempting to acquire leader lease %v...", desc)
|
||||
klog.Infof("attempting to acquire leader lease %v...", desc)
|
||||
wait.JitterUntil(func() {
|
||||
succeeded = le.tryAcquireOrRenew(ctx)
|
||||
le.maybeReportTransition()
|
||||
@ -289,8 +290,12 @@ func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaseDurationSeconds: 1,
|
||||
RenewTime: now,
|
||||
AcquireTime: now,
|
||||
}
|
||||
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
|
||||
klog.Errorf("Failed to release lock: %v", err)
|
||||
|
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@ -52,13 +52,14 @@ func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byt
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, []byte(recordBytes), nil
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
@ -92,8 +93,12 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@ -47,13 +47,14 @@ func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, []byte(recordBytes), nil
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
@ -87,8 +88,12 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
return err
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
16
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
16
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@ -19,6 +19,9 @@ package resourcelock
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -140,3 +143,16 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
}
|
||||
|
||||
// NewFromKubeconfig will create a lock of a given type according to the input parameters.
|
||||
func NewFromKubeconfig(lockType string, ns string, name string, rlc ResourceLockConfig, kubeconfig *restclient.Config, renewDeadline time.Duration) (Interface, error) {
|
||||
// shallow copy, do not modify the kubeconfig
|
||||
config := *kubeconfig
|
||||
timeout := ((renewDeadline / time.Millisecond) / 2) * time.Millisecond
|
||||
if timeout < time.Second {
|
||||
timeout = time.Second
|
||||
}
|
||||
config.Timeout = timeout
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "leader-election"))
|
||||
return New(lockType, ns, name, leaderElectionClient.CoreV1(), leaderElectionClient.CoordinationV1(), rlc)
|
||||
}
|
||||
|
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@ -71,9 +71,14 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error
|
||||
return errors.New("lease not initialized, call get or create first")
|
||||
}
|
||||
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
||||
lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ll.lease = lease
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
1
vendor/k8s.io/client-go/tools/metrics/OWNERS
generated
vendored
1
vendor/k8s.io/client-go/tools/metrics/OWNERS
generated
vendored
@ -2,6 +2,5 @@
|
||||
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- eparis
|
||||
- krousey
|
||||
- jayunit100
|
||||
|
1
vendor/k8s.io/client-go/tools/record/OWNERS
generated
vendored
1
vendor/k8s.io/client-go/tools/record/OWNERS
generated
vendored
@ -17,7 +17,6 @@ reviewers:
|
||||
- saad-ali
|
||||
- luxas
|
||||
- yifan-gu
|
||||
- eparis
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- jsafrane
|
||||
|
17
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
17
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@ -31,7 +31,7 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record/util"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const maxTriesPerEvent = 12
|
||||
@ -121,6 +121,10 @@ type EventBroadcaster interface {
|
||||
// function. The return value can be ignored or used to stop recording, if desired.
|
||||
StartLogging(logf func(format string, args ...interface{})) watch.Interface
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured
|
||||
// logging function. The return value can be ignored or used to stop recording, if desired.
|
||||
StartStructuredLogging(verbosity klog.Level) watch.Interface
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
|
||||
@ -266,7 +270,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
|
||||
klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -279,6 +283,15 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int
|
||||
})
|
||||
}
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface {
|
||||
return e.StartEventWatcher(
|
||||
func(e *v1.Event) {
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
|
||||
|
5
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
5
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
@ -153,7 +153,8 @@ func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool {
|
||||
// localKey - key that makes this event in the local group
|
||||
type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string)
|
||||
|
||||
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason
|
||||
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type,
|
||||
// event.Reason, event.ReportingController and event.ReportingInstance
|
||||
func EventAggregatorByReasonFunc(event *v1.Event) (string, string) {
|
||||
return strings.Join([]string{
|
||||
event.Source.Component,
|
||||
@ -165,6 +166,8 @@ func EventAggregatorByReasonFunc(event *v1.Event) (string, string) {
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.Type,
|
||||
event.Reason,
|
||||
event.ReportingController,
|
||||
event.ReportingInstance,
|
||||
},
|
||||
""), event.Message
|
||||
}
|
||||
|
16
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
16
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
@ -27,17 +27,29 @@ import (
|
||||
// thrown away in this case.
|
||||
type FakeRecorder struct {
|
||||
Events chan string
|
||||
|
||||
IncludeObject bool
|
||||
}
|
||||
|
||||
func objectString(object runtime.Object, includeObject bool) string {
|
||||
if !includeObject {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf(" involvedObject{kind=%s,apiVersion=%s}",
|
||||
object.GetObjectKind().GroupVersionKind().Kind,
|
||||
object.GetObjectKind().GroupVersionKind().GroupVersion(),
|
||||
)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message)
|
||||
f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
2
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
|
2
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
2
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// streamProtocolV1 implements the first version of the streaming exec & attach
|
||||
|
15
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
15
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -32,7 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// resourceVersionGetter is an interface used to get resource version from events.
|
||||
@ -101,7 +101,8 @@ func (rw *RetryWatcher) send(event watch.Event) bool {
|
||||
// If it is not done the second return value holds the time to wait before calling it again.
|
||||
func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
watcher, err := rw.watcherClient.Watch(metav1.ListOptions{
|
||||
ResourceVersion: rw.lastResourceVersion,
|
||||
ResourceVersion: rw.lastResourceVersion,
|
||||
AllowWatchBookmarks: true,
|
||||
})
|
||||
// We are very unlikely to hit EOF here since we are just establishing the call,
|
||||
// but it may happen that the apiserver is just shutting down (e.g. being restarted)
|
||||
@ -174,10 +175,12 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// All is fine; send the event and update lastResourceVersion
|
||||
ok = rw.send(event)
|
||||
if !ok {
|
||||
return true, 0
|
||||
// All is fine; send the non-bookmark events and update resource version.
|
||||
if event.Type != watch.Bookmark {
|
||||
ok = rw.send(event)
|
||||
if !ok {
|
||||
return true, 0
|
||||
}
|
||||
}
|
||||
rw.lastResourceVersion = resourceVersion
|
||||
|
||||
|
71
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
71
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
@ -22,13 +22,11 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
|
||||
@ -167,70 +165,3 @@ func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (
|
||||
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
||||
|
||||
// ListWatchUntil first lists objects, converts them into synthetic ADDED events
|
||||
// and checks conditions for those synthetic events. If the conditions have not been reached so far
|
||||
// it continues by calling Until which establishes a watch from resourceVersion of the list call
|
||||
// to evaluate those conditions based on new events.
|
||||
// ListWatchUntil provides the same guarantees as Until and replaces the old WATCH from RV "" (or "0")
|
||||
// which was mixing list and watch calls internally and having severe design issues. (see #74022)
|
||||
// There is no resourceVersion order guarantee for the initial list and those synthetic events.
|
||||
func ListWatchUntil(ctx context.Context, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialItems, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the initial items as simulated "adds"
|
||||
var lastEvent *watch.Event
|
||||
currIndex := 0
|
||||
passedConditions := 0
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ConditionSucceeded:
|
||||
for currIndex < len(initialItems) {
|
||||
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
|
||||
currIndex++
|
||||
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
break ConditionSucceeded
|
||||
}
|
||||
}
|
||||
}
|
||||
if passedConditions == len(conditions) {
|
||||
return lastEvent, nil
|
||||
}
|
||||
remainingConditions := conditions[passedConditions:]
|
||||
|
||||
metaObj, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
return Until(ctx, currResourceVersion, lw, remainingConditions...)
|
||||
}
|
||||
|
Reference in New Issue
Block a user