rebase: update kubernetes to latest

updating the kubernetes release to the
latest in main go.mod

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2024-08-19 10:01:33 +02:00
committed by mergify[bot]
parent 63c4c05b35
commit 5a66991bb3
2173 changed files with 98906 additions and 61334 deletions

View File

@ -159,6 +159,10 @@ type LeaderElectionConfig struct {
// Name is the name of the resource lock for debugging
Name string
// Coordinated will use the Coordinated Leader Election feature
// WARNING: Coordinated leader election is ALPHA.
Coordinated bool
}
// LeaderCallbacks are callbacks that are triggered during certain
@ -249,7 +253,11 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
desc := le.config.Lock.Describe()
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew(ctx)
if !le.config.Coordinated {
succeeded = le.tryAcquireOrRenew(ctx)
} else {
succeeded = le.tryCoordinatedRenew(ctx)
}
le.maybeReportTransition()
if !succeeded {
klog.V(4).Infof("failed to acquire lease %v", desc)
@ -272,7 +280,11 @@ func (le *LeaderElector) renew(ctx context.Context) {
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
defer timeoutCancel()
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
return le.tryAcquireOrRenew(timeoutCtx), nil
if !le.config.Coordinated {
return le.tryAcquireOrRenew(timeoutCtx), nil
} else {
return le.tryCoordinatedRenew(timeoutCtx), nil
}
}, timeoutCtx.Done())
le.maybeReportTransition()
@ -304,7 +316,9 @@ func (le *LeaderElector) release() bool {
RenewTime: now,
AcquireTime: now,
}
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), le.config.RenewDeadline)
defer timeoutCancel()
if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err)
return false
}
@ -313,6 +327,81 @@ func (le *LeaderElector) release() bool {
return true
}
// tryCoordinatedRenew checks if it acquired a lease and tries to renew the
// lease if it has already been acquired. Returns true on success else returns
// false.
func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
now := metav1.NewTime(le.clock.Now())
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain the electionRecord
oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
klog.Infof("lease lock not found: %v", le.config.Lock.Describe())
return false
}
// 2. Record obtained, check the Identity & Time
if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {
le.setObservedRecord(oldLeaderElectionRecord)
le.observedRawRecord = oldLeaderElectionRawRecord
}
hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time)
if hasExpired {
klog.Infof("lock has expired: %v", le.config.Lock.Describe())
return false
}
if !le.IsLeader() {
klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe())
return false
}
// 2b. If the lease has been marked as "end of term", don't renew it
if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" {
klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe())
// TODO: Instead of letting lease expire, the holder may deleted it directly
// This will not be compatible with all controllers, so it needs to be opt-in behavior.
// We must ensure all code guarded by this lease has successfully completed
// prior to releasing or there may be two processes
// simultaneously acting on the critical path.
// Usually once this returns false, the process is terminated..
// xref: OnStoppedLeading
return false
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if le.IsLeader() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
leaderElectionRecord.Strategy = oldLeaderElectionRecord.Strategy
le.metrics.slowpathExercised(le.config.Name)
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.setObservedRecord(&leaderElectionRecord)
return true
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.

View File

@ -0,0 +1,202 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"context"
"reflect"
"time"
v1 "k8s.io/api/coordination/v1"
v1alpha1 "k8s.io/api/coordination/v1alpha1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
const requeueInterval = 5 * time.Minute
type CacheSyncWaiter interface {
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
}
type LeaseCandidate struct {
leaseClient coordinationv1alpha1client.LeaseCandidateInterface
leaseCandidateInformer cache.SharedIndexInformer
informerFactory informers.SharedInformerFactory
hasSynced cache.InformerSynced
// At most there will be one item in this Queue (since we only watch one item)
queue workqueue.TypedRateLimitingInterface[int]
name string
namespace string
// controller lease
leaseName string
clock clock.Clock
binaryVersion, emulationVersion string
preferredStrategies []v1.CoordinatedLeaseStrategy
}
// NewCandidate creates new LeaseCandidate controller that creates a
// LeaseCandidate object if it does not exist and watches changes
// to the corresponding object and renews if PingTime is set.
// WARNING: This is an ALPHA feature. Ensure that the CoordinatedLeaderElection
// feature gate is on.
func NewCandidate(clientset kubernetes.Interface,
candidateNamespace string,
candidateName string,
targetLease string,
binaryVersion, emulationVersion string,
preferredStrategies []v1.CoordinatedLeaseStrategy,
) (*LeaseCandidate, CacheSyncWaiter, error) {
fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String()
// A separate informer factory is required because this must start before informerFactories
// are started for leader elected components
informerFactory := informers.NewSharedInformerFactoryWithOptions(
clientset, 5*time.Minute,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fieldSelector
}),
)
leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer()
lc := &LeaseCandidate{
leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace),
leaseCandidateInformer: leaseCandidateInformer,
informerFactory: informerFactory,
name: candidateName,
namespace: candidateNamespace,
leaseName: targetLease,
clock: clock.RealClock{},
binaryVersion: binaryVersion,
emulationVersion: emulationVersion,
preferredStrategies: preferredStrategies,
}
lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"})
h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok {
if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) {
lc.enqueueLease()
}
}
},
})
if err != nil {
return nil, nil, err
}
lc.hasSynced = h.HasSynced
return lc, informerFactory, nil
}
func (c *LeaseCandidate) Run(ctx context.Context) {
defer c.queue.ShutDown()
c.informerFactory.Start(ctx.Done())
if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) {
return
}
c.enqueueLease()
go c.runWorker(ctx)
<-ctx.Done()
}
func (c *LeaseCandidate) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}
func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool {
key, shutdown := c.queue.Get()
if shutdown {
return false
}
defer c.queue.Done(key)
err := c.ensureLease(ctx)
if err == nil {
c.queue.AddAfter(key, requeueInterval)
return true
}
utilruntime.HandleError(err)
c.queue.AddRateLimited(key)
return true
}
func (c *LeaseCandidate) enqueueLease() {
c.queue.Add(0)
}
// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and
// a bool (true if this call created the lease), or any error that occurs.
func (c *LeaseCandidate) ensureLease(ctx context.Context) error {
lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Creating lease candidate")
// lease does not exist, create it.
leaseToCreate := c.newLeaseCandidate()
if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil {
return err
}
klog.V(2).Infof("Created lease candidate")
return nil
} else if err != nil {
return err
}
klog.V(2).Infof("lease candidate exists. Renewing.")
clone := lease.DeepCopy()
clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
_, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate {
lc := &v1alpha1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: c.name,
Namespace: c.namespace,
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: c.leaseName,
BinaryVersion: c.binaryVersion,
EmulationVersion: c.emulationVersion,
PreferredStrategies: c.preferredStrategies,
},
}
lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
return lc
}

View File

@ -19,14 +19,15 @@ package resourcelock
import (
"context"
"fmt"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"time"
v1 "k8s.io/api/coordination/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
)
const (
@ -114,11 +115,13 @@ type LeaderElectionRecord struct {
// attempt to acquire leases with empty identities and will wait for the full lease
// interval to expire before attempting to reacquire. This value is set to empty when
// a client voluntarily steps down.
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
RenewTime metav1.Time `json:"renewTime"`
LeaderTransitions int `json:"leaderTransitions"`
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
RenewTime metav1.Time `json:"renewTime"`
LeaderTransitions int `json:"leaderTransitions"`
Strategy v1.CoordinatedLeaseStrategy `json:"strategy"`
PreferredHolder string `json:"preferredHolder"`
}
// EventRecorder records a change in the ResourceLock.

View File

@ -122,6 +122,12 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
if spec.RenewTime != nil {
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
}
if spec.PreferredHolder != nil {
r.PreferredHolder = *spec.PreferredHolder
}
if spec.Strategy != nil {
r.Strategy = *spec.Strategy
}
return &r
}
@ -129,11 +135,18 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
leaseTransitions := int32(ler.LeaderTransitions)
return coordinationv1.LeaseSpec{
spec := coordinationv1.LeaseSpec{
HolderIdentity: &ler.HolderIdentity,
LeaseDurationSeconds: &leaseDurationSeconds,
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
LeaseTransitions: &leaseTransitions,
}
if ler.PreferredHolder != "" {
spec.PreferredHolder = &ler.PreferredHolder
}
if ler.Strategy != "" {
spec.Strategy = &ler.Strategy
}
return spec
}