mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
5
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
5
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
@ -32,7 +32,7 @@ Having a defined format allows:
|
||||
|
||||
The file format is json, marshalled from a struct authcfg.Info.
|
||||
|
||||
Clinet libraries in other languages should use the same format.
|
||||
Client libraries in other languages should use the same format.
|
||||
|
||||
It is not intended to store general preferences, such as default
|
||||
namespace, output options, etc. CLIs (such as kubectl) and UIs should
|
||||
@ -65,7 +65,6 @@ package auth
|
||||
// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated.
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
@ -90,7 +89,7 @@ func LoadFromFile(path string) (*Info, error) {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -100,7 +99,6 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
|
20
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
20
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
@ -53,24 +53,8 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
|
||||
// ListAllByNamespace used to list items belongs to namespace from Indexer.
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ListAll(indexer, selector, appendFn)
|
||||
}
|
||||
|
||||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
@ -89,6 +73,8 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
|
163
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
163
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -135,7 +135,9 @@ type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again.
|
||||
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer with the requested resync period; zero means
|
||||
// this handler does not care about resyncs. The resync operation
|
||||
@ -150,7 +152,13 @@ type SharedInformer interface {
|
||||
// between any two resyncs may be longer than the nominal period
|
||||
// because the implementation takes time to do work and there may
|
||||
// be competing load and scheduling noise.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again and an error if the handler cannot be added.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error)
|
||||
// RemoveEventHandler removes a formerly added event handler given by
|
||||
// its registration handle.
|
||||
// This function is guaranteed to be idempotent, and thread-safe.
|
||||
RemoveEventHandler(handle ResourceEventHandlerRegistration) error
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController is deprecated, it does nothing useful
|
||||
@ -195,8 +203,18 @@ type SharedInformer interface {
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
SetTransform(handler TransformFunc) error
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped.
|
||||
// Adding event handlers to already stopped informers is not possible.
|
||||
// An informer already stopped will never be started again.
|
||||
IsStopped() bool
|
||||
}
|
||||
|
||||
// Opaque interface representing the registration of ResourceEventHandler for
|
||||
// a SharedInformer. Must be supplied back to the same SharedInformer's
|
||||
// `RemoveEventHandler` to unregister the handlers.
|
||||
type ResourceEventHandlerRegistration interface{}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
@ -492,8 +510,8 @@ func (s *sharedIndexInformer) GetController() Controller {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
|
||||
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) {
|
||||
return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
}
|
||||
|
||||
func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
@ -513,13 +531,12 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
|
||||
const minimumResyncPeriod = 1 * time.Second
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler)
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
@ -545,8 +562,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
|
||||
if !s.started {
|
||||
s.processor.addListener(listener)
|
||||
return
|
||||
return s.processor.addListener(listener), nil
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
@ -557,10 +573,11 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
s.processor.addListener(listener)
|
||||
handle := s.processor.addListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
@ -610,6 +627,26 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
s.processor.distribute(deleteNotification{oldObj: old}, false)
|
||||
}
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped
|
||||
func (s *sharedIndexInformer) IsStopped() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.stopped
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
// in order to safely remove, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. remove and stop listener
|
||||
// 3. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
return s.processor.removeListener(handle)
|
||||
}
|
||||
|
||||
// sharedProcessor has a collection of processorListener and can
|
||||
// distribute a notification object to its listeners. There are two
|
||||
// kinds of distribute operations. The sync distributions go to a
|
||||
@ -619,39 +656,85 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
type sharedProcessor struct {
|
||||
listenersStarted bool
|
||||
listenersLock sync.RWMutex
|
||||
listeners []*processorListener
|
||||
syncingListeners []*processorListener
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
// Map from listeners to whether or not they are currently syncing
|
||||
listeners map[*processorListener]bool
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) {
|
||||
func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if p.listeners == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if result, ok := registration.(*processorListener); ok {
|
||||
if _, exists := p.listeners[result]; exists {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
if p.listeners == nil {
|
||||
p.listeners = make(map[*processorListener]bool)
|
||||
}
|
||||
|
||||
p.listeners[listener] = true
|
||||
|
||||
if p.listenersStarted {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
|
||||
return listener
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
listener, ok := handle.(*processorListener)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid key type %t", handle)
|
||||
} else if p.listeners == nil {
|
||||
// No listeners are registered, do nothing
|
||||
return nil
|
||||
} else if _, exists := p.listeners[listener]; !exists {
|
||||
// Listener is not registered, just do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(p.listeners, listener)
|
||||
|
||||
if p.listenersStarted {
|
||||
close(listener.addCh)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if sync {
|
||||
for _, listener := range p.syncingListeners {
|
||||
for listener, isSyncing := range p.listeners {
|
||||
switch {
|
||||
case !sync:
|
||||
// non-sync messages are delivered to every listener
|
||||
listener.add(obj)
|
||||
}
|
||||
} else {
|
||||
for _, listener := range p.listeners {
|
||||
case isSyncing:
|
||||
// sync messages are delivered to every syncing listener
|
||||
listener.add(obj)
|
||||
default:
|
||||
// skipping a sync obj for a non-syncing listener
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -660,18 +743,27 @@ func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
for listener := range p.listeners {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
p.listenersStarted = true
|
||||
}()
|
||||
<-stopCh
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
for listener := range p.listeners {
|
||||
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
|
||||
}
|
||||
|
||||
// Wipe out list of listeners since they are now closed
|
||||
// (processorListener cannot be re-used)
|
||||
p.listeners = nil
|
||||
|
||||
// Reset to false since no listeners are running
|
||||
p.listenersStarted = false
|
||||
|
||||
p.wg.Wait() // Wait for all .pop() and .run() to stop
|
||||
}
|
||||
|
||||
@ -681,16 +773,16 @@ func (p *sharedProcessor) shouldResync() bool {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.syncingListeners = []*processorListener{}
|
||||
|
||||
resyncNeeded := false
|
||||
now := p.clock.Now()
|
||||
for _, listener := range p.listeners {
|
||||
for listener := range p.listeners {
|
||||
// need to loop through all the listeners to see if they need to resync so we can prepare any
|
||||
// listeners that are going to be resyncing.
|
||||
if listener.shouldResync(now) {
|
||||
shouldResync := listener.shouldResync(now)
|
||||
p.listeners[listener] = shouldResync
|
||||
|
||||
if shouldResync {
|
||||
resyncNeeded = true
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
listener.determineNextResync(now)
|
||||
}
|
||||
}
|
||||
@ -701,8 +793,9 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
for listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(
|
||||
listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
listener.setResyncPeriod(resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
303
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
303
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@ -59,15 +59,159 @@ type ThreadSafeStore interface {
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// storeIndex implements the indexing functionality for Store interface
|
||||
type storeIndex struct {
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
}
|
||||
|
||||
func (i *storeIndex) reset() {
|
||||
i.indices = Indices{}
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexedValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := i.indices[indexName]
|
||||
|
||||
var storeKeySet sets.String
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
storeKeySet = index[indexedValues[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
storeKeySet = sets.String{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storeKeySet, nil
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := i.indices[indexName]
|
||||
return index[indexedValue], nil
|
||||
}
|
||||
|
||||
func (i *storeIndex) getIndexValues(indexName string) []string {
|
||||
index := i.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (i *storeIndex) addIndexers(newIndexers Indexers) error {
|
||||
oldKeys := sets.StringKeySet(i.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
i.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range i.indexers {
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := i.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
i.indices[name] = index
|
||||
}
|
||||
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
i.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
i.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
|
||||
// threadSafeMap implements ThreadSafeStore
|
||||
type threadSafeMap struct {
|
||||
lock sync.RWMutex
|
||||
items map[string]interface{}
|
||||
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
// index implements the indexing functionality
|
||||
index *storeIndex
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
@ -79,14 +223,14 @@ func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
c.index.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.updateIndices(obj, nil, key)
|
||||
c.index.updateIndices(obj, nil, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
@ -126,9 +270,9 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st
|
||||
c.items = items
|
||||
|
||||
// rebuild any index
|
||||
c.indices = Indices{}
|
||||
c.index.reset()
|
||||
for key, item := range c.items {
|
||||
c.updateIndices(nil, item, key)
|
||||
c.index.updateIndices(nil, item, key)
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,32 +282,10 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexedValues, err := indexFunc(obj)
|
||||
storeKeySet, err := c.index.getKeysFromIndex(indexName, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
var storeKeySet sets.String
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
storeKeySet = index[indexedValues[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
storeKeySet = sets.String{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list := make([]interface{}, 0, storeKeySet.Len())
|
||||
for storeKey := range storeKeySet {
|
||||
@ -177,14 +299,10 @@ func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{},
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
set, err := c.index.getKeysByIndex(indexName, indexedValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexedValue]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
@ -199,14 +317,10 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
set, err := c.index.getKeysByIndex(indexName, indexedValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexedValue]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
@ -214,16 +328,11 @@ func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
index := c.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
return c.index.getIndexValues(indexName)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) GetIndexers() Indexers {
|
||||
return c.indexers
|
||||
return c.index.indexers
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
@ -234,87 +343,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
return fmt.Errorf("cannot add indexers to running index")
|
||||
}
|
||||
|
||||
oldKeys := sets.StringKeySet(c.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
c.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range c.indexers {
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
c.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
c.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
return c.index.addIndexers(newIndexers)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
@ -325,8 +354,10 @@ func (c *threadSafeMap) Resync() error {
|
||||
// NewThreadSafeStore creates a new instance of ThreadSafeStore.
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
items: map[string]interface{}{},
|
||||
index: &storeIndex{
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
91
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
91
vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
generated
vendored
@ -20,10 +20,11 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -82,21 +83,21 @@ func MinifyConfig(config *Config) error {
|
||||
}
|
||||
|
||||
var (
|
||||
redactedBytes []byte
|
||||
dataOmittedBytes []byte
|
||||
redactedBytes []byte
|
||||
)
|
||||
|
||||
// Flatten redacts raw data entries from the config object for a human-readable view.
|
||||
// ShortenConfig redacts raw data entries from the config object for a human-readable view.
|
||||
func ShortenConfig(config *Config) {
|
||||
// trick json encoder into printing a human readable string in the raw data
|
||||
// trick json encoder into printing a human-readable string in the raw data
|
||||
// by base64 decoding what we want to print. Relies on implementation of
|
||||
// http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
|
||||
for key, authInfo := range config.AuthInfos {
|
||||
if len(authInfo.ClientKeyData) > 0 {
|
||||
authInfo.ClientKeyData = redactedBytes
|
||||
authInfo.ClientKeyData = dataOmittedBytes
|
||||
}
|
||||
if len(authInfo.ClientCertificateData) > 0 {
|
||||
authInfo.ClientCertificateData = redactedBytes
|
||||
authInfo.ClientCertificateData = dataOmittedBytes
|
||||
}
|
||||
if len(authInfo.Token) > 0 {
|
||||
authInfo.Token = "REDACTED"
|
||||
@ -111,7 +112,7 @@ func ShortenConfig(config *Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// Flatten changes the config object into a self contained config (useful for making secrets)
|
||||
// FlattenConfig changes the config object into a self-contained config (useful for making secrets)
|
||||
func FlattenConfig(config *Config) error {
|
||||
for key, authInfo := range config.AuthInfos {
|
||||
baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
|
||||
@ -152,7 +153,7 @@ func FlattenContent(path *string, contents *[]byte, baseDir string) error {
|
||||
|
||||
var err error
|
||||
absPath := ResolvePath(*path, baseDir)
|
||||
*contents, err = ioutil.ReadFile(absPath)
|
||||
*contents, err = os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -189,3 +190,77 @@ func MakeAbs(path, base string) (string, error) {
|
||||
}
|
||||
return filepath.Join(base, path), nil
|
||||
}
|
||||
|
||||
// RedactSecrets replaces any sensitive values with REDACTED
|
||||
func RedactSecrets(config *Config) error {
|
||||
return redactSecrets(reflect.ValueOf(config), false)
|
||||
}
|
||||
|
||||
func redactSecrets(curr reflect.Value, redact bool) error {
|
||||
redactedBytes = []byte("REDACTED")
|
||||
if !curr.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
actualCurrValue := curr
|
||||
if curr.Kind() == reflect.Ptr {
|
||||
actualCurrValue = curr.Elem()
|
||||
}
|
||||
|
||||
switch actualCurrValue.Kind() {
|
||||
case reflect.Map:
|
||||
for _, v := range actualCurrValue.MapKeys() {
|
||||
err := redactSecrets(actualCurrValue.MapIndex(v), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case reflect.String:
|
||||
if redact {
|
||||
if !actualCurrValue.IsZero() {
|
||||
actualCurrValue.SetString("REDACTED")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case reflect.Slice:
|
||||
if actualCurrValue.Type() == reflect.TypeOf([]byte{}) && redact {
|
||||
if !actualCurrValue.IsNil() {
|
||||
actualCurrValue.SetBytes(redactedBytes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < actualCurrValue.Len(); i++ {
|
||||
err := redactSecrets(actualCurrValue.Index(i), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case reflect.Struct:
|
||||
for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ {
|
||||
currFieldValue := actualCurrValue.Field(fieldIndex)
|
||||
currFieldType := actualCurrValue.Type().Field(fieldIndex)
|
||||
currYamlTag := currFieldType.Tag.Get("datapolicy")
|
||||
currFieldTypeYamlName := strings.Split(currYamlTag, ",")[0]
|
||||
if currFieldTypeYamlName != "" {
|
||||
err := redactSecrets(currFieldValue, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := redactSecrets(currFieldValue, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
5
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
5
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
@ -93,6 +93,11 @@ type Cluster struct {
|
||||
// attach, port forward).
|
||||
// +optional
|
||||
ProxyURL string `json:"proxy-url,omitempty"`
|
||||
// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
|
||||
// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
|
||||
// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
|
||||
// +optional
|
||||
DisableCompression bool `json:"disable-compression,omitempty"`
|
||||
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
|
||||
// +optional
|
||||
Extensions map[string]runtime.Object `json:"extensions,omitempty"`
|
||||
|
5
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
5
vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
generated
vendored
@ -86,6 +86,11 @@ type Cluster struct {
|
||||
// attach, port forward).
|
||||
// +optional
|
||||
ProxyURL string `json:"proxy-url,omitempty"`
|
||||
// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
|
||||
// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
|
||||
// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
|
||||
// +optional
|
||||
DisableCompression bool `json:"disable-compression,omitempty"`
|
||||
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
|
||||
// +optional
|
||||
Extensions []NamedExtension `json:"extensions,omitempty"`
|
||||
|
2
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
2
vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
generated
vendored
@ -257,6 +257,7 @@ func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conv
|
||||
out.CertificateAuthority = in.CertificateAuthority
|
||||
out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
|
||||
out.ProxyURL = in.ProxyURL
|
||||
out.DisableCompression = in.DisableCompression
|
||||
if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -276,6 +277,7 @@ func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conv
|
||||
out.CertificateAuthority = in.CertificateAuthority
|
||||
out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
|
||||
out.ProxyURL = in.ProxyURL
|
||||
out.DisableCompression = in.DisableCompression
|
||||
if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
3
vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
generated
vendored
3
vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"golang.org/x/term"
|
||||
@ -59,7 +58,7 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
|
||||
if err != nil {
|
||||
return &auth, err
|
||||
}
|
||||
err = ioutil.WriteFile(path, data, 0600)
|
||||
err = os.WriteFile(path, data, 0600)
|
||||
return &auth, err
|
||||
}
|
||||
authPtr, err := clientauth.LoadFromFile(path)
|
||||
|
7
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
7
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
@ -19,7 +19,6 @@ package clientcmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -165,6 +164,8 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
clientConfig.Proxy = http.ProxyURL(u)
|
||||
}
|
||||
|
||||
clientConfig.DisableCompression = configClusterInfo.DisableCompression
|
||||
|
||||
if config.overrides != nil && len(config.overrides.Timeout) > 0 {
|
||||
timeout, err := ParseTimeout(config.overrides.Timeout)
|
||||
if err != nil {
|
||||
@ -246,7 +247,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
||||
mergedConfig.BearerToken = configAuthInfo.Token
|
||||
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
|
||||
} else if len(configAuthInfo.TokenFile) > 0 {
|
||||
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
|
||||
tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -586,7 +587,7 @@ func (config *inClusterClientConfig) Namespace() (string, bool, error) {
|
||||
}
|
||||
|
||||
// Fall back to the namespace associated with the service account token, if available
|
||||
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
|
||||
if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
|
||||
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
|
||||
return ns, false, nil
|
||||
}
|
||||
|
9
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
9
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@ -18,7 +18,6 @@ package clientcmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -283,12 +282,12 @@ func (rules *ClientConfigLoadingRules) Migrate() error {
|
||||
return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(source)
|
||||
data, err := os.ReadFile(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// destination is created with mode 0666 before umask
|
||||
err = ioutil.WriteFile(destination, data, 0666)
|
||||
err = os.WriteFile(destination, data, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -363,7 +362,7 @@ func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config
|
||||
|
||||
// LoadFromFile takes a filename and deserializes the contents into Config object
|
||||
func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
|
||||
kubeconfigBytes, err := ioutil.ReadFile(filename)
|
||||
kubeconfigBytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -429,7 +428,7 @@ func WriteToFile(config clientcmdapi.Config, filename string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filename, content, 0600); err != nil {
|
||||
if err := os.WriteFile(filename, content, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
42
vendor/k8s.io/client-go/tools/clientcmd/overrides.go
generated
vendored
42
vendor/k8s.io/client-go/tools/clientcmd/overrides.go
generated
vendored
@ -74,6 +74,7 @@ type ClusterOverrideFlags struct {
|
||||
InsecureSkipTLSVerify FlagInfo
|
||||
TLSServerName FlagInfo
|
||||
ProxyURL FlagInfo
|
||||
DisableCompression FlagInfo
|
||||
}
|
||||
|
||||
// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to
|
||||
@ -143,25 +144,26 @@ func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo {
|
||||
}
|
||||
|
||||
const (
|
||||
FlagClusterName = "cluster"
|
||||
FlagAuthInfoName = "user"
|
||||
FlagContext = "context"
|
||||
FlagNamespace = "namespace"
|
||||
FlagAPIServer = "server"
|
||||
FlagTLSServerName = "tls-server-name"
|
||||
FlagInsecure = "insecure-skip-tls-verify"
|
||||
FlagCertFile = "client-certificate"
|
||||
FlagKeyFile = "client-key"
|
||||
FlagCAFile = "certificate-authority"
|
||||
FlagEmbedCerts = "embed-certs"
|
||||
FlagBearerToken = "token"
|
||||
FlagImpersonate = "as"
|
||||
FlagImpersonateUID = "as-uid"
|
||||
FlagImpersonateGroup = "as-group"
|
||||
FlagUsername = "username"
|
||||
FlagPassword = "password"
|
||||
FlagTimeout = "request-timeout"
|
||||
FlagProxyURL = "proxy-url"
|
||||
FlagClusterName = "cluster"
|
||||
FlagAuthInfoName = "user"
|
||||
FlagContext = "context"
|
||||
FlagNamespace = "namespace"
|
||||
FlagAPIServer = "server"
|
||||
FlagTLSServerName = "tls-server-name"
|
||||
FlagInsecure = "insecure-skip-tls-verify"
|
||||
FlagCertFile = "client-certificate"
|
||||
FlagKeyFile = "client-key"
|
||||
FlagCAFile = "certificate-authority"
|
||||
FlagEmbedCerts = "embed-certs"
|
||||
FlagBearerToken = "token"
|
||||
FlagImpersonate = "as"
|
||||
FlagImpersonateUID = "as-uid"
|
||||
FlagImpersonateGroup = "as-group"
|
||||
FlagUsername = "username"
|
||||
FlagPassword = "password"
|
||||
FlagTimeout = "request-timeout"
|
||||
FlagProxyURL = "proxy-url"
|
||||
FlagDisableCompression = "disable-compression"
|
||||
)
|
||||
|
||||
// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
|
||||
@ -198,6 +200,7 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
|
||||
InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
|
||||
TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."},
|
||||
ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"},
|
||||
DisableCompression: FlagInfo{prefix + FlagDisableCompression, "", "", "If true, opt-out of response compression for all requests to the server"},
|
||||
}
|
||||
}
|
||||
|
||||
@ -238,6 +241,7 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f
|
||||
flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
|
||||
flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName)
|
||||
flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL)
|
||||
flagNames.DisableCompression.BindBoolFlag(flags, &clusterInfo.DisableCompression)
|
||||
}
|
||||
|
||||
// BindFlags is a convenience method to bind the specified flags to their associated variables
|
||||
|
32
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
32
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
@ -292,8 +292,9 @@ func getKey(event *eventsv1.Event) eventKey {
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
// TODO: this function should also return an error.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() {
|
||||
return e.StartEventWatcher(
|
||||
stopWatcher, err := e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
@ -302,19 +303,20 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func
|
||||
}
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to start event watcher: '%v'", err)
|
||||
return func() {}
|
||||
}
|
||||
return stopWatcher
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value is used to stop recording
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() {
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
// TODO: Rewrite the function signature to return an error, for
|
||||
// now just return a no-op function
|
||||
return func() {
|
||||
klog.Error("The event watcher failed to start")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
@ -326,10 +328,10 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime
|
||||
eventHandler(watchEvent.Object)
|
||||
}
|
||||
}()
|
||||
return watcher.Stop
|
||||
return watcher.Stop, nil
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) {
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error {
|
||||
eventHandler := func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
@ -338,18 +340,26 @@ func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) {
|
||||
}
|
||||
e.recordToSink(event, clock.RealClock{})
|
||||
}
|
||||
stopWatcher := e.StartEventWatcher(eventHandler)
|
||||
stopWatcher, err := e.StartEventWatcher(eventHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-stopCh
|
||||
stopWatcher()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) {
|
||||
go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh)
|
||||
go wait.Until(e.finishSeries, finishTime, stopCh)
|
||||
e.startRecordingEvents(stopCh)
|
||||
err := e.startRecordingEvents(stopCh)
|
||||
if err != nil {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type eventBroadcasterAdapterImpl struct {
|
||||
|
2
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
2
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
@ -55,7 +55,7 @@ type EventBroadcaster interface {
|
||||
// of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests).
|
||||
// NOTE: events received on your eventHandler should be copied before being used.
|
||||
// TODO: figure out if this can be removed.
|
||||
StartEventWatcher(eventHandler func(event runtime.Object)) func()
|
||||
StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error)
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured
|
||||
// logging function. The return value can be ignored or used to stop recording, if desired.
|
||||
|
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@ -44,11 +44,11 @@ type configMapLock struct {
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cml.cm = cm
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
|
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@ -39,11 +39,11 @@ type endpointsLock struct {
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
ep, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
el.e = ep
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
|
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
4
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@ -39,11 +39,11 @@ type LeaseLock struct {
|
||||
|
||||
// Get returns the election record from a Lease spec
|
||||
func (ll *LeaseLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ctx, ll.LeaseMeta.Name, metav1.GetOptions{})
|
||||
lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ctx, ll.LeaseMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ll.lease = lease
|
||||
record := LeaseSpecToLeaderElectionRecord(&ll.lease.Spec)
|
||||
recordByte, err := json.Marshal(*record)
|
||||
if err != nil {
|
||||
|
5
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
5
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
@ -203,6 +203,11 @@ func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.Li
|
||||
}()
|
||||
|
||||
for o := range chunkC {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
err := fn(o)
|
||||
if err != nil {
|
||||
return err // any fn error should be returned immediately
|
||||
|
5
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
5
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
@ -348,10 +347,11 @@ func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
}
|
||||
// we're not writing to this stream
|
||||
errorStream.Close()
|
||||
defer pf.streamConn.RemoveStreams(errorStream)
|
||||
|
||||
errorChan := make(chan error)
|
||||
go func() {
|
||||
message, err := ioutil.ReadAll(errorStream)
|
||||
message, err := io.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil:
|
||||
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
|
||||
@ -368,6 +368,7 @@ func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
|
||||
return
|
||||
}
|
||||
defer pf.streamConn.RemoveStreams(dataStream)
|
||||
|
||||
localError := make(chan struct{})
|
||||
remoteDone := make(chan struct{})
|
||||
|
2
vendor/k8s.io/client-go/tools/reference/ref.go
generated
vendored
2
vendor/k8s.io/client-go/tools/reference/ref.go
generated
vendored
@ -34,7 +34,7 @@ var (
|
||||
// GetReference returns an ObjectReference which refers to the given
|
||||
// object, or an error if the object doesn't follow the conventions
|
||||
// that would allow this.
|
||||
// TODO: should take a meta.Interface see http://issue.k8s.io/7127
|
||||
// TODO: should take a meta.Interface see https://issue.k8s.io/7127
|
||||
func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) {
|
||||
if obj == nil {
|
||||
return nil, ErrNilObject
|
||||
|
3
vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
generated
vendored
3
vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
generated
vendored
@ -19,7 +19,6 @@ package remotecommand
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
@ -39,7 +38,7 @@ func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
message, err := ioutil.ReadAll(errorStream)
|
||||
message, err := io.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil && err != io.EOF:
|
||||
errorChan <- fmt.Errorf("error reading from error stream: %s", err)
|
||||
|
60
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
60
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -27,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
spdy "k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
)
|
||||
|
||||
// StreamOptions holds information pertaining to the current streaming session:
|
||||
@ -43,11 +44,16 @@ type StreamOptions struct {
|
||||
|
||||
// Executor is an interface for transporting shell-style streams.
|
||||
type Executor interface {
|
||||
// Stream initiates the transport of the standard shell streams. It will transport any
|
||||
// non-nil stream to a remote system, and return an error if a problem occurs. If tty
|
||||
// is set, the stderr stream is not used (raw TTY manages stdout and stderr over the
|
||||
// stdout stream).
|
||||
// Deprecated: use StreamWithContext instead to avoid possible resource leaks.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/103177 for details.
|
||||
Stream(options StreamOptions) error
|
||||
|
||||
// StreamWithContext initiates the transport of the standard shell streams. It will
|
||||
// transport any non-nil stream to a remote system, and return an error if a problem
|
||||
// occurs. If tty is set, the stderr stream is not used (raw TTY manages stdout and
|
||||
// stderr over the stdout stream).
|
||||
// The context controls the entire lifetime of stream execution.
|
||||
StreamWithContext(ctx context.Context, options StreamOptions) error
|
||||
}
|
||||
|
||||
type streamCreator interface {
|
||||
@ -106,9 +112,14 @@ func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgr
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *streamExecutor) Stream(options StreamOptions) error {
|
||||
req, err := http.NewRequest(e.method, e.url.String(), nil)
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating request: %v", err)
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
@ -118,9 +129,8 @@ func (e *streamExecutor) Stream(options StreamOptions) error {
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
@ -138,5 +148,35 @@ func (e *streamExecutor) Stream(options StreamOptions) error {
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return streamer.stream(conn)
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
7
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
7
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
@ -19,7 +19,6 @@ package remotecommand
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -29,8 +28,8 @@ import (
|
||||
|
||||
// streamProtocolV1 implements the first version of the streaming exec & attach
|
||||
// protocol. This version has some bugs, such as not being able to detect when
|
||||
// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
|
||||
// http://issues.k8s.io/13395 for more details.
|
||||
// non-interactive stdin data has ended. See https://issues.k8s.io/13394 and
|
||||
// https://issues.k8s.io/13395 for more details.
|
||||
type streamProtocolV1 struct {
|
||||
StreamOptions
|
||||
|
||||
@ -111,7 +110,7 @@ func (p *streamProtocolV1) stream(conn streamCreator) error {
|
||||
|
||||
// always read from errorStream
|
||||
go func() {
|
||||
message, err := ioutil.ReadAll(p.errorStream)
|
||||
message, err := io.ReadAll(p.errorStream)
|
||||
if err != nil && err != io.EOF {
|
||||
errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
|
||||
return
|
||||
|
7
vendor/k8s.io/client-go/tools/remotecommand/v2.go
generated
vendored
7
vendor/k8s.io/client-go/tools/remotecommand/v2.go
generated
vendored
@ -19,7 +19,6 @@ package remotecommand
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
@ -126,7 +125,7 @@ func (p *streamProtocolV2) copyStdin() {
|
||||
|
||||
// this "copy" doesn't actually read anything - it's just here to wait for
|
||||
// the server to close remoteStdin.
|
||||
if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil {
|
||||
if _, err := io.Copy(io.Discard, p.remoteStdin); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}()
|
||||
@ -145,7 +144,7 @@ func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
|
||||
// make sure, packet in queue can be consumed.
|
||||
// block in queue may lead to deadlock in conn.server
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/96339
|
||||
defer io.Copy(ioutil.Discard, p.remoteStdout)
|
||||
defer io.Copy(io.Discard, p.remoteStdout)
|
||||
|
||||
if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
|
||||
runtime.HandleError(err)
|
||||
@ -162,7 +161,7 @@ func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
defer wg.Done()
|
||||
defer io.Copy(ioutil.Discard, p.remoteStderr)
|
||||
defer io.Copy(io.Discard, p.remoteStderr)
|
||||
|
||||
if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
|
||||
runtime.HandleError(err)
|
||||
|
Reference in New Issue
Block a user