mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@ -7,7 +7,6 @@ approvers:
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- ncdc
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
@ -23,6 +22,6 @@ reviewers:
|
||||
- jsafrane
|
||||
- dims
|
||||
- ingvagabund
|
||||
- ncdc
|
||||
emeritus_approvers:
|
||||
- lavalamp
|
||||
- ncdc
|
||||
|
10
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -57,7 +57,7 @@ var (
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
// name identifies this reflector. By default, it will be a file:line if possible.
|
||||
name string
|
||||
// The name of the type we expect to place in the store. The name
|
||||
// will be the stringification of expectedGVK if provided, and the
|
||||
@ -121,6 +121,14 @@ type Reflector struct {
|
||||
UseWatchList *bool
|
||||
}
|
||||
|
||||
func (r *Reflector) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r *Reflector) TypeDescription() string {
|
||||
return r.typeDescription
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
// track the current resource version of the reflector. This is especially
|
||||
// important if storage bookmarks are enabled.
|
||||
|
2
vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
generated
vendored
2
vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
generated
vendored
@ -50,7 +50,7 @@ func init() {
|
||||
Scheme = runtime.NewScheme()
|
||||
utilruntime.Must(api.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1.AddToScheme(Scheme))
|
||||
yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
|
||||
yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true})
|
||||
Codec = versioning.NewDefaultingCodecForScheme(
|
||||
Scheme,
|
||||
yamlSerializer,
|
||||
|
76
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
76
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
@ -29,8 +29,6 @@ import (
|
||||
clientauth "k8s.io/client-go/tools/auth"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -241,45 +239,37 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride)
|
||||
|
||||
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
|
||||
if err != nil {
|
||||
if err := merge(clientConfig, userAuthPartialConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo)
|
||||
if err := merge(clientConfig, serverAuthPartialConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride)
|
||||
}
|
||||
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
||||
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
|
||||
// both, so we have to split the objects and merge them separately
|
||||
// we want this order of precedence for the server identification
|
||||
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. load the ~/.kubernetes_auth file as a default
|
||||
func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
|
||||
mergedConfig := &restclient.Config{}
|
||||
// both, so we have to split the objects and merge them separately.
|
||||
|
||||
// configClusterInfo holds the information identify the server provided by .kubeconfig
|
||||
// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo
|
||||
// (the final result of command line flags and merged .kubeconfig files).
|
||||
func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config {
|
||||
configClientConfig := &restclient.Config{}
|
||||
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
|
||||
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
|
||||
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
|
||||
configClientConfig.ServerName = configClusterInfo.TLSServerName
|
||||
mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride)
|
||||
|
||||
return mergedConfig, nil
|
||||
return configClientConfig
|
||||
}
|
||||
|
||||
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
|
||||
// both, so we have to split the objects and merge them separately
|
||||
// we want this order of precedence for user identification
|
||||
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
|
||||
// 4. if there is not enough information to identify the user, prompt if possible
|
||||
// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo
|
||||
// (the final result of command line flags and merged .kubeconfig files);
|
||||
// if the information available there is insufficient, it prompts (if possible) for additional information.
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
|
||||
mergedConfig := &restclient.Config{}
|
||||
|
||||
@ -338,8 +328,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
||||
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
|
||||
previouslyMergedConfig := mergedConfig
|
||||
mergedConfig = &restclient.Config{}
|
||||
mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride)
|
||||
mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride)
|
||||
if err := merge(mergedConfig, promptedConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := merge(mergedConfig, previouslyMergedConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.promptedCredentials.username = mergedConfig.Username
|
||||
config.promptedCredentials.password = mergedConfig.Password
|
||||
}
|
||||
@ -347,7 +341,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
|
||||
return mergedConfig, nil
|
||||
}
|
||||
|
||||
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
|
||||
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information
|
||||
func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
|
||||
config := &restclient.Config{}
|
||||
config.Username = info.User
|
||||
@ -507,12 +501,16 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
|
||||
|
||||
mergedContext := clientcmdapi.NewContext()
|
||||
if configContext, exists := contexts[contextName]; exists {
|
||||
mergo.Merge(mergedContext, configContext, mergo.WithOverride)
|
||||
if err := merge(mergedContext, configContext); err != nil {
|
||||
return clientcmdapi.Context{}, err
|
||||
}
|
||||
} else if required {
|
||||
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
|
||||
}
|
||||
if config.overrides != nil {
|
||||
mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride)
|
||||
if err := merge(mergedContext, &config.overrides.Context); err != nil {
|
||||
return clientcmdapi.Context{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return *mergedContext, nil
|
||||
@ -525,12 +523,16 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
|
||||
|
||||
mergedAuthInfo := clientcmdapi.NewAuthInfo()
|
||||
if configAuthInfo, exists := authInfos[authInfoName]; exists {
|
||||
mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride)
|
||||
if err := merge(mergedAuthInfo, configAuthInfo); err != nil {
|
||||
return clientcmdapi.AuthInfo{}, err
|
||||
}
|
||||
} else if required {
|
||||
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
|
||||
}
|
||||
if config.overrides != nil {
|
||||
mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride)
|
||||
if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil {
|
||||
return clientcmdapi.AuthInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return *mergedAuthInfo, nil
|
||||
@ -543,15 +545,21 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
|
||||
|
||||
mergedClusterInfo := clientcmdapi.NewCluster()
|
||||
if config.overrides != nil {
|
||||
mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride)
|
||||
if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil {
|
||||
return clientcmdapi.Cluster{}, err
|
||||
}
|
||||
}
|
||||
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
|
||||
mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride)
|
||||
if err := merge(mergedClusterInfo, configClusterInfo); err != nil {
|
||||
return clientcmdapi.Cluster{}, err
|
||||
}
|
||||
} else if required {
|
||||
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
|
||||
}
|
||||
if config.overrides != nil {
|
||||
mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride)
|
||||
if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil {
|
||||
return clientcmdapi.Cluster{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
|
||||
|
17
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
17
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
goruntime "runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -248,7 +247,9 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
||||
mapConfig := clientcmdapi.NewConfig()
|
||||
|
||||
for _, kubeconfig := range kubeconfigs {
|
||||
mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride)
|
||||
if err := merge(mapConfig, kubeconfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// merge all of the struct values in the reverse order so that priority is given correctly
|
||||
@ -256,14 +257,20 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
||||
nonMapConfig := clientcmdapi.NewConfig()
|
||||
for i := len(kubeconfigs) - 1; i >= 0; i-- {
|
||||
kubeconfig := kubeconfigs[i]
|
||||
mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride)
|
||||
if err := merge(nonMapConfig, kubeconfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
|
||||
// get the values we expect.
|
||||
config := clientcmdapi.NewConfig()
|
||||
mergo.Merge(config, mapConfig, mergo.WithOverride)
|
||||
mergo.Merge(config, nonMapConfig, mergo.WithOverride)
|
||||
if err := merge(config, mapConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := merge(config, nonMapConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rules.ResolvePaths() {
|
||||
if err := ResolveLocalPaths(config); err != nil {
|
||||
|
121
vendor/k8s.io/client-go/tools/clientcmd/merge.go
generated
vendored
Normal file
121
vendor/k8s.io/client-go/tools/clientcmd/merge.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clientcmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// recursively merges src into dst:
|
||||
// - non-pointer struct fields with any exported fields are recursively merged
|
||||
// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero
|
||||
// - maps are shallow merged with src keys taking priority over dst
|
||||
// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops
|
||||
func merge[T any](dst, src *T) error {
|
||||
if dst == nil {
|
||||
return fmt.Errorf("cannot merge into nil pointer")
|
||||
}
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem())
|
||||
}
|
||||
|
||||
func mergeValues(fieldNames []string, dst, src reflect.Value) error {
|
||||
dstType := dst.Type()
|
||||
// no-op if we can't read the src
|
||||
if !src.IsValid() {
|
||||
return nil
|
||||
}
|
||||
// sanity check types match
|
||||
if srcType := src.Type(); dstType != srcType {
|
||||
return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, "."))
|
||||
}
|
||||
|
||||
switch dstType.Kind() {
|
||||
case reflect.Struct:
|
||||
if hasExportedField(dstType) {
|
||||
// recursively merge
|
||||
for i, n := 0, dstType.NumField(); i < n; i++ {
|
||||
if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if dst.CanSet() {
|
||||
// If all fields are unexported, overwrite with src.
|
||||
// Using src.IsZero() would make more sense but that's not what mergo did.
|
||||
dst.Set(src)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if dst.CanSet() && !src.IsZero() {
|
||||
// initialize dst if needed
|
||||
if dst.IsZero() {
|
||||
dst.Set(reflect.MakeMap(dstType))
|
||||
}
|
||||
// shallow-merge overwriting dst keys with src keys
|
||||
for _, mapKey := range src.MapKeys() {
|
||||
dst.SetMapIndex(mapKey, src.MapIndex(mapKey))
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if dst.CanSet() && src.Len() > 0 {
|
||||
// overwrite dst with non-empty src slice
|
||||
dst.Set(src)
|
||||
}
|
||||
|
||||
case reflect.Pointer:
|
||||
if dst.CanSet() && !src.IsZero() {
|
||||
// overwrite dst with non-zero values for other types
|
||||
if dstType.Elem().Kind() == reflect.Struct {
|
||||
// use struct pointer as-is
|
||||
dst.Set(src)
|
||||
} else {
|
||||
// shallow-copy non-struct pointer (interfaces, primitives, etc)
|
||||
dst.Set(reflect.New(dstType.Elem()))
|
||||
dst.Elem().Set(src.Elem())
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
if dst.CanSet() && !src.IsZero() {
|
||||
// overwrite dst with non-zero values for other types
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasExportedField returns true if the given type has any exported fields,
|
||||
// or if it has any anonymous/embedded struct fields with exported fields
|
||||
func hasExportedField(dstType reflect.Type) bool {
|
||||
for i, n := 0, dstType.NumField(); i < n; i++ {
|
||||
field := dstType.Field(i)
|
||||
if field.Anonymous && field.Type.Kind() == reflect.Struct {
|
||||
if hasExportedField(dstType.Field(i).Type) {
|
||||
return true
|
||||
}
|
||||
} else if len(field.PkgPath) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
2
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
2
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
@ -2,10 +2,12 @@
|
||||
|
||||
approvers:
|
||||
- mikedanese
|
||||
- jefftree
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- ingvagabund
|
||||
- jefftree
|
||||
emeritus_approvers:
|
||||
- timothysc
|
||||
|
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -173,7 +173,10 @@ type LeaderElectionConfig struct {
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading.
|
||||
// This callback is always called when the LeaderElector exits, even if it did not start leading.
|
||||
// Users should not assume that OnStoppedLeading is only called after OnStartedLeading.
|
||||
// see: https://github.com/kubernetes/kubernetes/pull/127675#discussion_r1780059887
|
||||
OnStoppedLeading func()
|
||||
// OnNewLeader is called when the client observes a leader that is
|
||||
// not the previously observed leader. This includes the first observed
|
||||
@ -277,16 +280,13 @@ func (le *LeaderElector) renew(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wait.Until(func() {
|
||||
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
|
||||
defer timeoutCancel()
|
||||
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, le.config.RetryPeriod, le.config.RenewDeadline, true, func(ctx context.Context) (done bool, err error) {
|
||||
if !le.config.Coordinated {
|
||||
return le.tryAcquireOrRenew(timeoutCtx), nil
|
||||
return le.tryAcquireOrRenew(ctx), nil
|
||||
} else {
|
||||
return le.tryCoordinatedRenew(timeoutCtx), nil
|
||||
return le.tryCoordinatedRenew(ctx), nil
|
||||
}
|
||||
}, timeoutCtx.Done())
|
||||
|
||||
})
|
||||
le.maybeReportTransition()
|
||||
desc := le.config.Lock.Describe()
|
||||
if err == nil {
|
||||
@ -426,7 +426,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
le.setObservedRecord(&leaderElectionRecord)
|
||||
return true
|
||||
}
|
||||
klog.Errorf("Failed to update lock optimitically: %v, falling back to slow path", err)
|
||||
klog.Errorf("Failed to update lock optimistically: %v, falling back to slow path", err)
|
||||
}
|
||||
|
||||
// 2. obtain or create the ElectionRecord
|
||||
|
32
vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
generated
vendored
32
vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
generated
vendored
@ -22,14 +22,14 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/coordination/v1"
|
||||
v1alpha1 "k8s.io/api/coordination/v1alpha1"
|
||||
v1alpha2 "k8s.io/api/coordination/v1alpha2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
|
||||
coordinationv1alpha2client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
@ -43,7 +43,7 @@ type CacheSyncWaiter interface {
|
||||
}
|
||||
|
||||
type LeaseCandidate struct {
|
||||
leaseClient coordinationv1alpha1client.LeaseCandidateInterface
|
||||
leaseClient coordinationv1alpha2client.LeaseCandidateInterface
|
||||
leaseCandidateInformer cache.SharedIndexInformer
|
||||
informerFactory informers.SharedInformerFactory
|
||||
hasSynced cache.InformerSynced
|
||||
@ -60,7 +60,7 @@ type LeaseCandidate struct {
|
||||
clock clock.Clock
|
||||
|
||||
binaryVersion, emulationVersion string
|
||||
preferredStrategies []v1.CoordinatedLeaseStrategy
|
||||
strategy v1.CoordinatedLeaseStrategy
|
||||
}
|
||||
|
||||
// NewCandidate creates new LeaseCandidate controller that creates a
|
||||
@ -73,7 +73,7 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
candidateName string,
|
||||
targetLease string,
|
||||
binaryVersion, emulationVersion string,
|
||||
preferredStrategies []v1.CoordinatedLeaseStrategy,
|
||||
strategy v1.CoordinatedLeaseStrategy,
|
||||
) (*LeaseCandidate, CacheSyncWaiter, error) {
|
||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String()
|
||||
// A separate informer factory is required because this must start before informerFactories
|
||||
@ -84,10 +84,10 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
options.FieldSelector = fieldSelector
|
||||
}),
|
||||
)
|
||||
leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer()
|
||||
leaseCandidateInformer := informerFactory.Coordination().V1alpha2().LeaseCandidates().Informer()
|
||||
|
||||
lc := &LeaseCandidate{
|
||||
leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace),
|
||||
leaseClient: clientset.CoordinationV1alpha2().LeaseCandidates(candidateNamespace),
|
||||
leaseCandidateInformer: leaseCandidateInformer,
|
||||
informerFactory: informerFactory,
|
||||
name: candidateName,
|
||||
@ -96,13 +96,13 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
clock: clock.RealClock{},
|
||||
binaryVersion: binaryVersion,
|
||||
emulationVersion: emulationVersion,
|
||||
preferredStrategies: preferredStrategies,
|
||||
strategy: strategy,
|
||||
}
|
||||
lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"})
|
||||
|
||||
h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok {
|
||||
if leasecandidate, ok := newObj.(*v1alpha2.LeaseCandidate); ok {
|
||||
if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) {
|
||||
lc.enqueueLease()
|
||||
}
|
||||
@ -184,17 +184,17 @@ func (c *LeaseCandidate) ensureLease(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate {
|
||||
lc := &v1alpha1.LeaseCandidate{
|
||||
func (c *LeaseCandidate) newLeaseCandidate() *v1alpha2.LeaseCandidate {
|
||||
lc := &v1alpha2.LeaseCandidate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.name,
|
||||
Namespace: c.namespace,
|
||||
},
|
||||
Spec: v1alpha1.LeaseCandidateSpec{
|
||||
LeaseName: c.leaseName,
|
||||
BinaryVersion: c.binaryVersion,
|
||||
EmulationVersion: c.emulationVersion,
|
||||
PreferredStrategies: c.preferredStrategies,
|
||||
Spec: v1alpha2.LeaseCandidateSpec{
|
||||
LeaseName: c.leaseName,
|
||||
BinaryVersion: c.binaryVersion,
|
||||
EmulationVersion: c.emulationVersion,
|
||||
Strategy: c.strategy,
|
||||
},
|
||||
}
|
||||
lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
|
||||
|
74
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
74
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@ -35,74 +35,8 @@ const (
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
// When using endpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - endpoints
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using configMapsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - configmaps
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@ -177,9 +111,9 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
}
|
||||
switch lockType {
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case endpointsLeasesResourceLock:
|
||||
|
19
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
19
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
@ -37,7 +37,13 @@ import (
|
||||
// TODO move to API machinery and re-unify with kubelet/server/portfoward
|
||||
const PortForwardProtocolV1Name = "portforward.k8s.io"
|
||||
|
||||
var ErrLostConnectionToPod = errors.New("lost connection to pod")
|
||||
var (
|
||||
// error returned whenever we lost connection to a pod
|
||||
ErrLostConnectionToPod = errors.New("lost connection to pod")
|
||||
|
||||
// set of error we're expecting during port-forwarding
|
||||
networkClosedError = "use of closed network connection"
|
||||
)
|
||||
|
||||
// PortForwarder knows how to listen for local connections and forward them to
|
||||
// a remote pod via an upgraded HTTP request.
|
||||
@ -312,7 +318,7 @@ func (pf *PortForwarder) waitForConnection(listener net.Listener, port Forwarded
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
|
||||
if !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
|
||||
}
|
||||
return
|
||||
@ -381,7 +387,7 @@ func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
|
||||
go func() {
|
||||
// Copy from the remote side to the local port.
|
||||
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
|
||||
}
|
||||
|
||||
@ -394,7 +400,7 @@ func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
defer dataStream.Close()
|
||||
|
||||
// Copy from the local port to the remote side.
|
||||
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
|
||||
// break out of the select below without waiting for the other copy to finish
|
||||
close(localError)
|
||||
@ -407,6 +413,11 @@ func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
case <-localError:
|
||||
}
|
||||
|
||||
// reset dataStream to discard any unsent data, preventing port forwarding from being blocked.
|
||||
// we must reset dataStream before waiting on errorChan, otherwise,
|
||||
// the blocking data will affect errorStream and cause <-errorChan to block indefinitely.
|
||||
_ = dataStream.Reset()
|
||||
|
||||
// always expect something on errorChan (it may be nil)
|
||||
err = <-errorChan
|
||||
if err != nil {
|
||||
|
8
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
8
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
@ -23,14 +23,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/lru"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -77,6 +76,7 @@ func getSpamKey(event *v1.Event) string {
|
||||
event.InvolvedObject.Name,
|
||||
string(event.InvolvedObject.UID),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.Type,
|
||||
},
|
||||
"")
|
||||
}
|
||||
@ -90,8 +90,6 @@ type EventFilterFunc func(event *v1.Event) bool
|
||||
// EventSourceObjectSpamFilter is responsible for throttling
|
||||
// the amount of events a source and object can produce.
|
||||
type EventSourceObjectSpamFilter struct {
|
||||
sync.RWMutex
|
||||
|
||||
// the cache that manages last synced state
|
||||
cache *lru.Cache
|
||||
|
||||
@ -133,8 +131,6 @@ func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool {
|
||||
eventKey := f.spamKeyFunc(event)
|
||||
|
||||
// do we have a record of similar events in our cache?
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
value, found := f.cache.Get(eventKey)
|
||||
if found {
|
||||
record = value.(spamRecord)
|
||||
|
2
vendor/k8s.io/client-go/tools/remotecommand/v4.go
generated
vendored
2
vendor/k8s.io/client-go/tools/remotecommand/v4.go
generated
vendored
@ -115,5 +115,5 @@ func (d *errorDecoderV4) decode(message []byte) error {
|
||||
return errors.New("error stream protocol error: unknown error")
|
||||
}
|
||||
|
||||
return fmt.Errorf(status.Message)
|
||||
return errors.New(status.Message)
|
||||
}
|
||||
|
29
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
29
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -128,6 +128,35 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
// Check if the watch failed due to the client not having permission to watch the resource or the credentials
|
||||
// being invalid (e.g. expired token).
|
||||
if apierrors.IsForbidden(err) || apierrors.IsUnauthorized(err) {
|
||||
// Add more detail since the forbidden message returned by the Kubernetes API is just "unknown".
|
||||
klog.ErrorS(err, msg+": ensure the client has valid credentials and watch permissions on the resource")
|
||||
|
||||
if apiStatus, ok := err.(apierrors.APIStatus); ok {
|
||||
statusErr := apiStatus.Status()
|
||||
|
||||
sent := rw.send(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &statusErr,
|
||||
})
|
||||
if !sent {
|
||||
// This likely means the RetryWatcher is stopping but return false so the caller to doReceive can
|
||||
// verify this and potentially retry.
|
||||
klog.Error("Failed to send the Unauthorized or Forbidden watch event")
|
||||
|
||||
return false, 0
|
||||
}
|
||||
} else {
|
||||
// This should never happen since apierrors only handles apierrors.APIStatus. Still, this is an
|
||||
// unrecoverable error, so still allow it to return true below.
|
||||
klog.ErrorS(err, msg+": encountered an unexpected Unauthorized or Forbidden error type")
|
||||
}
|
||||
|
||||
return true, 0
|
||||
}
|
||||
|
||||
klog.ErrorS(err, msg)
|
||||
// Retry
|
||||
return false, 0
|
||||
|
Reference in New Issue
Block a user