mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
2
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
2
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
@ -2,10 +2,12 @@
|
||||
|
||||
approvers:
|
||||
- mikedanese
|
||||
- jefftree
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- ingvagabund
|
||||
- jefftree
|
||||
emeritus_approvers:
|
||||
- timothysc
|
||||
|
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
18
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -173,7 +173,10 @@ type LeaderElectionConfig struct {
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading.
|
||||
// This callback is always called when the LeaderElector exits, even if it did not start leading.
|
||||
// Users should not assume that OnStoppedLeading is only called after OnStartedLeading.
|
||||
// see: https://github.com/kubernetes/kubernetes/pull/127675#discussion_r1780059887
|
||||
OnStoppedLeading func()
|
||||
// OnNewLeader is called when the client observes a leader that is
|
||||
// not the previously observed leader. This includes the first observed
|
||||
@ -277,16 +280,13 @@ func (le *LeaderElector) renew(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wait.Until(func() {
|
||||
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
|
||||
defer timeoutCancel()
|
||||
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, le.config.RetryPeriod, le.config.RenewDeadline, true, func(ctx context.Context) (done bool, err error) {
|
||||
if !le.config.Coordinated {
|
||||
return le.tryAcquireOrRenew(timeoutCtx), nil
|
||||
return le.tryAcquireOrRenew(ctx), nil
|
||||
} else {
|
||||
return le.tryCoordinatedRenew(timeoutCtx), nil
|
||||
return le.tryCoordinatedRenew(ctx), nil
|
||||
}
|
||||
}, timeoutCtx.Done())
|
||||
|
||||
})
|
||||
le.maybeReportTransition()
|
||||
desc := le.config.Lock.Describe()
|
||||
if err == nil {
|
||||
@ -426,7 +426,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
le.setObservedRecord(&leaderElectionRecord)
|
||||
return true
|
||||
}
|
||||
klog.Errorf("Failed to update lock optimitically: %v, falling back to slow path", err)
|
||||
klog.Errorf("Failed to update lock optimistically: %v, falling back to slow path", err)
|
||||
}
|
||||
|
||||
// 2. obtain or create the ElectionRecord
|
||||
|
32
vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
generated
vendored
32
vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
generated
vendored
@ -22,14 +22,14 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/coordination/v1"
|
||||
v1alpha1 "k8s.io/api/coordination/v1alpha1"
|
||||
v1alpha2 "k8s.io/api/coordination/v1alpha2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
|
||||
coordinationv1alpha2client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
@ -43,7 +43,7 @@ type CacheSyncWaiter interface {
|
||||
}
|
||||
|
||||
type LeaseCandidate struct {
|
||||
leaseClient coordinationv1alpha1client.LeaseCandidateInterface
|
||||
leaseClient coordinationv1alpha2client.LeaseCandidateInterface
|
||||
leaseCandidateInformer cache.SharedIndexInformer
|
||||
informerFactory informers.SharedInformerFactory
|
||||
hasSynced cache.InformerSynced
|
||||
@ -60,7 +60,7 @@ type LeaseCandidate struct {
|
||||
clock clock.Clock
|
||||
|
||||
binaryVersion, emulationVersion string
|
||||
preferredStrategies []v1.CoordinatedLeaseStrategy
|
||||
strategy v1.CoordinatedLeaseStrategy
|
||||
}
|
||||
|
||||
// NewCandidate creates new LeaseCandidate controller that creates a
|
||||
@ -73,7 +73,7 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
candidateName string,
|
||||
targetLease string,
|
||||
binaryVersion, emulationVersion string,
|
||||
preferredStrategies []v1.CoordinatedLeaseStrategy,
|
||||
strategy v1.CoordinatedLeaseStrategy,
|
||||
) (*LeaseCandidate, CacheSyncWaiter, error) {
|
||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String()
|
||||
// A separate informer factory is required because this must start before informerFactories
|
||||
@ -84,10 +84,10 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
options.FieldSelector = fieldSelector
|
||||
}),
|
||||
)
|
||||
leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer()
|
||||
leaseCandidateInformer := informerFactory.Coordination().V1alpha2().LeaseCandidates().Informer()
|
||||
|
||||
lc := &LeaseCandidate{
|
||||
leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace),
|
||||
leaseClient: clientset.CoordinationV1alpha2().LeaseCandidates(candidateNamespace),
|
||||
leaseCandidateInformer: leaseCandidateInformer,
|
||||
informerFactory: informerFactory,
|
||||
name: candidateName,
|
||||
@ -96,13 +96,13 @@ func NewCandidate(clientset kubernetes.Interface,
|
||||
clock: clock.RealClock{},
|
||||
binaryVersion: binaryVersion,
|
||||
emulationVersion: emulationVersion,
|
||||
preferredStrategies: preferredStrategies,
|
||||
strategy: strategy,
|
||||
}
|
||||
lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"})
|
||||
|
||||
h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok {
|
||||
if leasecandidate, ok := newObj.(*v1alpha2.LeaseCandidate); ok {
|
||||
if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) {
|
||||
lc.enqueueLease()
|
||||
}
|
||||
@ -184,17 +184,17 @@ func (c *LeaseCandidate) ensureLease(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate {
|
||||
lc := &v1alpha1.LeaseCandidate{
|
||||
func (c *LeaseCandidate) newLeaseCandidate() *v1alpha2.LeaseCandidate {
|
||||
lc := &v1alpha2.LeaseCandidate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.name,
|
||||
Namespace: c.namespace,
|
||||
},
|
||||
Spec: v1alpha1.LeaseCandidateSpec{
|
||||
LeaseName: c.leaseName,
|
||||
BinaryVersion: c.binaryVersion,
|
||||
EmulationVersion: c.emulationVersion,
|
||||
PreferredStrategies: c.preferredStrategies,
|
||||
Spec: v1alpha2.LeaseCandidateSpec{
|
||||
LeaseName: c.leaseName,
|
||||
BinaryVersion: c.binaryVersion,
|
||||
EmulationVersion: c.emulationVersion,
|
||||
Strategy: c.strategy,
|
||||
},
|
||||
}
|
||||
lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
|
||||
|
74
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
74
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@ -35,74 +35,8 @@ const (
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
// When using endpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - endpoints
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using configMapsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - configmaps
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@ -177,9 +111,9 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
}
|
||||
switch lockType {
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case endpointsLeasesResourceLock:
|
||||
|
Reference in New Issue
Block a user