mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
Fresh dep ensure
This commit is contained in:
51
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["controller.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/nodelease",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
206
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go
generated
vendored
Normal file
206
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go
generated
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodelease
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
coordv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// renewInterval is the interval at which the lease is renewed
|
||||
// TODO(mtaufen): 10s was the decision in the KEP, to keep the behavior as close to the
|
||||
// current default behavior as possible. In the future, we should determine a reasonable
|
||||
// fraction of the lease duration at which to renew, and use that instead.
|
||||
renewInterval = 10 * time.Second
|
||||
// maxUpdateRetries is the number of immediate, successive retries the Kubelet will attempt
|
||||
// when renewing the lease before it waits for the renewal interval before trying again,
|
||||
// similar to what we do for node status retries
|
||||
maxUpdateRetries = 5
|
||||
// maxBackoff is the maximum sleep time during backoff (e.g. in backoffEnsureLease)
|
||||
maxBackoff = 7 * time.Second
|
||||
)
|
||||
|
||||
// Controller manages creating and renewing the lease for this Kubelet
|
||||
type Controller interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
type controller struct {
|
||||
client clientset.Interface
|
||||
leaseClient coordclientset.LeaseInterface
|
||||
holderIdentity string
|
||||
leaseDurationSeconds int32
|
||||
renewInterval time.Duration
|
||||
clock clock.Clock
|
||||
onRepeatedHeartbeatFailure func()
|
||||
}
|
||||
|
||||
// NewController constructs and returns a controller
|
||||
func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func()) Controller {
|
||||
var leaseClient coordclientset.LeaseInterface
|
||||
if client != nil {
|
||||
leaseClient = client.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||
}
|
||||
return &controller{
|
||||
client: client,
|
||||
leaseClient: leaseClient,
|
||||
holderIdentity: holderIdentity,
|
||||
leaseDurationSeconds: leaseDurationSeconds,
|
||||
renewInterval: renewInterval,
|
||||
clock: clock,
|
||||
onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure,
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the controller
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
if c.leaseClient == nil {
|
||||
klog.Infof("node lease controller has nil lease client, will not claim or renew leases")
|
||||
return
|
||||
}
|
||||
wait.Until(c.sync, c.renewInterval, stopCh)
|
||||
}
|
||||
|
||||
func (c *controller) sync() {
|
||||
lease, created := c.backoffEnsureLease()
|
||||
// we don't need to update the lease if we just created it
|
||||
if !created {
|
||||
c.retryUpdateLease(lease)
|
||||
}
|
||||
}
|
||||
|
||||
// backoffEnsureLease attempts to create the lease if it does not exist,
|
||||
// and uses exponentially increasing waits to prevent overloading the API server
|
||||
// with retries. Returns the lease, and true if this call created the lease,
|
||||
// false otherwise.
|
||||
func (c *controller) backoffEnsureLease() (*coordv1beta1.Lease, bool) {
|
||||
var (
|
||||
lease *coordv1beta1.Lease
|
||||
created bool
|
||||
err error
|
||||
)
|
||||
sleep := 100 * time.Millisecond
|
||||
for {
|
||||
lease, created, err = c.ensureLease()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
sleep = minDuration(2*sleep, maxBackoff)
|
||||
klog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err)
|
||||
// backoff wait
|
||||
c.clock.Sleep(sleep)
|
||||
}
|
||||
return lease, created
|
||||
}
|
||||
|
||||
// ensureLease creates the lease if it does not exist. Returns the lease and
|
||||
// a bool (true if this call created the lease), or any error that occurs.
|
||||
func (c *controller) ensureLease() (*coordv1beta1.Lease, bool, error) {
|
||||
lease, err := c.leaseClient.Get(c.holderIdentity, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
// lease does not exist, create it
|
||||
lease, err := c.leaseClient.Create(c.newLease(nil))
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return lease, true, nil
|
||||
} else if err != nil {
|
||||
// unexpected error getting lease
|
||||
return nil, false, err
|
||||
}
|
||||
// lease already existed
|
||||
return lease, false, nil
|
||||
}
|
||||
|
||||
// retryUpdateLease attempts to update the lease for maxUpdateRetries,
|
||||
// call this once you're sure the lease has been created
|
||||
func (c *controller) retryUpdateLease(base *coordv1beta1.Lease) {
|
||||
for i := 0; i < maxUpdateRetries; i++ {
|
||||
_, err := c.leaseClient.Update(c.newLease(base))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
klog.Errorf("failed to update node lease, error: %v", err)
|
||||
if i > 0 && c.onRepeatedHeartbeatFailure != nil {
|
||||
c.onRepeatedHeartbeatFailure()
|
||||
}
|
||||
}
|
||||
klog.Errorf("failed %d attempts to update node lease, will retry after %v", maxUpdateRetries, c.renewInterval)
|
||||
}
|
||||
|
||||
// newLease constructs a new lease if base is nil, or returns a copy of base
|
||||
// with desired state asserted on the copy.
|
||||
func (c *controller) newLease(base *coordv1beta1.Lease) *coordv1beta1.Lease {
|
||||
// Use the bare minimum set of fields; other fields exist for debugging/legacy,
|
||||
// but we don't need to make node heartbeats more complicated by using them.
|
||||
var lease *coordv1beta1.Lease
|
||||
if base == nil {
|
||||
lease = &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.holderIdentity,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(c.holderIdentity),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(c.leaseDurationSeconds),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
lease = base.DeepCopy()
|
||||
}
|
||||
lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
|
||||
|
||||
// Setting owner reference needs node's UID. Note that it is different from
|
||||
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
|
||||
// the connection between master and node is not ready yet. So try to set
|
||||
// owner reference every time when renewing the lease, until successful.
|
||||
if lease.OwnerReferences == nil || len(lease.OwnerReferences) == 0 {
|
||||
if node, err := c.client.CoreV1().Nodes().Get(c.holderIdentity, metav1.GetOptions{}); err == nil {
|
||||
lease.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||
Name: c.holderIdentity,
|
||||
UID: node.UID,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err)
|
||||
}
|
||||
}
|
||||
|
||||
return lease
|
||||
}
|
||||
|
||||
func minDuration(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
197
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller_test.go
generated
vendored
Normal file
197
vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller_test.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodelease
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
coordv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestNewLease(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
node := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
UID: types.UID("foo-uid"),
|
||||
},
|
||||
}
|
||||
cases := []struct {
|
||||
desc string
|
||||
controller *controller
|
||||
base *coordv1beta1.Lease
|
||||
expect *coordv1beta1.Lease
|
||||
}{
|
||||
{
|
||||
desc: "nil base without node",
|
||||
controller: &controller{
|
||||
client: fake.NewSimpleClientset(),
|
||||
holderIdentity: node.Name,
|
||||
leaseDurationSeconds: 10,
|
||||
clock: fakeClock,
|
||||
},
|
||||
base: nil,
|
||||
expect: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "nil base with node",
|
||||
controller: &controller{
|
||||
client: fake.NewSimpleClientset(node),
|
||||
holderIdentity: node.Name,
|
||||
leaseDurationSeconds: 10,
|
||||
clock: fakeClock,
|
||||
},
|
||||
base: nil,
|
||||
expect: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "non-nil base without owner ref, renew time is updated",
|
||||
controller: &controller{
|
||||
client: fake.NewSimpleClientset(node),
|
||||
holderIdentity: node.Name,
|
||||
leaseDurationSeconds: 10,
|
||||
clock: fakeClock,
|
||||
},
|
||||
base: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now().Add(-10 * time.Second)},
|
||||
},
|
||||
},
|
||||
expect: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "non-nil base with owner ref, renew time is updated",
|
||||
controller: &controller{
|
||||
client: fake.NewSimpleClientset(node),
|
||||
holderIdentity: node.Name,
|
||||
leaseDurationSeconds: 10,
|
||||
clock: fakeClock,
|
||||
},
|
||||
base: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now().Add(-10 * time.Second)},
|
||||
},
|
||||
},
|
||||
expect: &coordv1beta1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: node.Name,
|
||||
Namespace: corev1.NamespaceNodeLease,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: coordv1beta1.LeaseSpec{
|
||||
HolderIdentity: pointer.StringPtr(node.Name),
|
||||
LeaseDurationSeconds: pointer.Int32Ptr(10),
|
||||
RenewTime: &metav1.MicroTime{Time: fakeClock.Now()},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
newLease := tc.controller.newLease(tc.base)
|
||||
if newLease == tc.base {
|
||||
t.Fatalf("the new lease must be newly allocated, but got same address as base")
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(tc.expect, newLease) {
|
||||
t.Errorf("unexpected result from newLease: %s", diff.ObjectDiff(tc.expect, newLease))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user