mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor updates
This commit is contained in:
6
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/deployment/BUILD
generated
vendored
@ -13,17 +13,19 @@ go_test(
|
||||
"deployment_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/integration/deployment",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
432
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
432
vendor/k8s.io/kubernetes/test/integration/deployment/deployment_test.go
generated
vendored
@ -26,8 +26,11 @@ import (
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -775,7 +778,7 @@ func TestFailedDeployment(t *testing.T) {
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
if err = tester.waitForDeploymentUpdatedReplicasLTE(replicas); err != nil {
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -1068,3 +1071,430 @@ func TestScaledRolloutDeployment(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpecReplicasChange(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-spec-replicas-change"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType
|
||||
tester.deployment.Spec.Strategy.RollingUpdate = nil
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Scale up/down deployment and verify its replicaset has matching .spec.replicas
|
||||
if err = tester.scaleDeployment(2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = tester.scaleDeployment(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a template annotation change to test deployment's status does update
|
||||
// without .spec.replicas change
|
||||
var oldGeneration int64
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
oldGeneration = update.Generation
|
||||
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
savedGeneration := tester.deployment.Generation
|
||||
if savedGeneration == oldGeneration {
|
||||
t.Fatalf("Failed to verify .Generation has incremented for deployment %q", deploymentName)
|
||||
}
|
||||
if err = tester.waitForObservedDeployment(savedGeneration); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentAvailableCondition(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-available-condition"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(10)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
// Assign a high value to the deployment's minReadySeconds
|
||||
tester.deployment.Spec.MinReadySeconds = 3600
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the deployment to be observed by the controller and has at least specified number of updated replicas
|
||||
if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 0, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Mark the pods as ready without waiting for the deployment to complete
|
||||
if err = tester.markUpdatedPodsReadyWithoutComplete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for number of ready replicas to equal number of replicas.
|
||||
if err = tester.waitForReadyReplicas(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 0, 10); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Update the deployment's minReadySeconds to a small value
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.MinReadySeconds = 1
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to notice minReadySeconds has changed
|
||||
if err := tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period
|
||||
if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired counts
|
||||
if err = tester.checkDeploymentStatusReplicasFields(10, 10, 10, 10, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for deployment to automatically patch incorrect ControllerRef of RS
|
||||
func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) {
|
||||
ns := rs.Namespace
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns)
|
||||
rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.OwnerReferences = []metav1.OwnerReference{*ownerReference}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(newRS) != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for controllerRef of the replicaset %q to become nil: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to obtain replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(newRS)
|
||||
if controllerRef.UID != tester.deployment.UID {
|
||||
t.Fatalf("controllerRef of replicaset %q has a different UID: Expected %v, got %v", newRS.Name, tester.deployment.UID, controllerRef.UID)
|
||||
}
|
||||
ownerReferenceNum := len(newRS.GetOwnerReferences())
|
||||
if ownerReferenceNum != expectedOwnerReferenceNum {
|
||||
t.Fatalf("unexpected number of owner references for replicaset %q: Expected %d, got %d", newRS.Name, expectedOwnerReferenceNum, ownerReferenceNum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneralReplicaSetAdoption(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-general-replicaset-adoption"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if rs == nil {
|
||||
t.Fatalf("unable to find replicaset of deployment %q", deploymentName)
|
||||
}
|
||||
|
||||
// When the only OwnerReference of the RS points to another type of API object such as statefulset
|
||||
// with Controller=false, the deployment should add a second OwnerReference (ControllerRef) pointing to itself
|
||||
// with Controller=true
|
||||
var falseVar = false
|
||||
ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 2)
|
||||
|
||||
// When the only OwnerReference of the RS points to the deployment with Controller=false,
|
||||
// the deployment should set Controller=true for the only OwnerReference
|
||||
ownerReference = metav1.OwnerReference{UID: tester.deployment.UID, APIVersion: "extensions/v1beta1", Kind: "Deployment", Name: deploymentName, Controller: &falseVar}
|
||||
testRSControllerRefPatch(t, tester, rs, &ownerReference, 1)
|
||||
}
|
||||
|
||||
func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) {
|
||||
ns := tester.deployment.Namespace
|
||||
deploymentName := tester.deployment.Name
|
||||
deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns)
|
||||
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
kind := "Deployment"
|
||||
scaleClient := tester.c.ExtensionsV1beta1().Scales(ns)
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if scale.Spec.Replicas != *deployment.Spec.Replicas {
|
||||
t.Fatalf("Scale subresource for deployment %q does not match .Spec.Replicas: expected %d, got %d", deploymentName, *deployment.Spec.Replicas, scale.Spec.Replicas)
|
||||
}
|
||||
|
||||
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
scale, err := scaleClient.Get(kind, deploymentName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale.Spec.Replicas = replicas
|
||||
_, err = scaleClient.Update(kind, scale)
|
||||
return err
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
deployment, err = deploymentClient.Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if *deployment.Spec.Replicas != replicas {
|
||||
t.Fatalf(".Spec.Replicas of deployment %q does not match its scale subresource: expected %d, got %d", deploymentName, replicas, *deployment.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentScaleSubresource(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-deployment-scale-subresource"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(2)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Use scale subresource to scale the deployment up to 3
|
||||
testScalingUsingScaleSubresource(t, tester, 3)
|
||||
// Use the scale subresource to scale the deployment down to 0
|
||||
testScalingUsingScaleSubresource(t, tester, 0)
|
||||
}
|
||||
|
||||
// This test verifies that the Deployment does orphan a ReplicaSet when the ReplicaSet's
|
||||
// .Labels field is changed to no longer match the Deployment's selector. It also partially
|
||||
// verifies that collision avoidance mechanism is triggered when a Deployment's new ReplicaSet
|
||||
// is orphaned, even without PodTemplateSpec change. Refer comment below for more info:
|
||||
// https://github.com/kubernetes/kubernetes/pull/59212#discussion_r166465113
|
||||
func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-replicaset-orphaning-and-adoption-when-labels-change"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
deploymentName := "deployment"
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)}
|
||||
var err error
|
||||
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the deployment completes while marking its pods as ready simultaneously
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels
|
||||
|
||||
// Get replicaset of the deployment
|
||||
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
|
||||
}
|
||||
if rs == nil {
|
||||
t.Fatalf("unable to find replicaset of deployment %q", deploymentName)
|
||||
}
|
||||
|
||||
// Verify controllerRef of the replicaset is not nil and pointing to the deployment
|
||||
controllerRef := metav1.GetControllerOf(rs)
|
||||
if controllerRef == nil {
|
||||
t.Fatalf("controllerRef of replicaset %q is nil", rs.Name)
|
||||
}
|
||||
if controllerRef.UID != tester.deployment.UID {
|
||||
t.Fatalf("controllerRef of replicaset %q has a different UID: Expected %v, got %v", rs.Name, tester.deployment.UID, controllerRef.UID)
|
||||
}
|
||||
|
||||
// Change the replicaset's labels to not match the deployment's labels
|
||||
labelMap := map[string]string{"new-name": "new-test"}
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.Labels = labelMap
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the controllerRef of the replicaset to become nil
|
||||
rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns.Name)
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
rs, err = rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return metav1.GetControllerOf(rs) == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for controllerRef of replicaset %q to become nil: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to create a new replicaset
|
||||
// This will trigger collision avoidance due to deterministic nature of replicaset name
|
||||
// i.e., the new replicaset will have a name with different hash to preserve name uniqueness
|
||||
var newRS *v1beta1.ReplicaSet
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err)
|
||||
}
|
||||
return newRS != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to wait for deployment %q to create a new replicaset after orphaning: %v", deploymentName, err)
|
||||
}
|
||||
if newRS.UID == rs.UID {
|
||||
t.Fatalf("expect deployment %q to create a new replicaset different from the orphaned one, but it isn't", deploymentName)
|
||||
}
|
||||
|
||||
// Adoption: deployment should add controllerRef to a RS when the RS's labels change to match its labels
|
||||
|
||||
// Change the old replicaset's labels to match the deployment's labels
|
||||
rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) {
|
||||
update.Labels = testLabels()
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update replicaset %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to adopt the old replicaset
|
||||
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
controllerRef = metav1.GetControllerOf(rs)
|
||||
return controllerRef != nil && controllerRef.UID == tester.deployment.UID, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed waiting for replicaset adoption by deployment %q to complete: %v", deploymentName, err)
|
||||
}
|
||||
}
|
||||
|
96
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
96
vendor/k8s.io/kubernetes/test/integration/deployment/util.go
generated
vendored
@ -210,7 +210,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str
|
||||
|
||||
func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
addPodConditionReady(pod, metav1.Now())
|
||||
_, err := c.Core().Pods(ns).UpdateStatus(pod)
|
||||
_, err := c.CoreV1().Pods(ns).UpdateStatus(pod)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -368,6 +368,10 @@ func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.Up
|
||||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
|
||||
func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
|
||||
return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
|
||||
@ -378,8 +382,8 @@ func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image strin
|
||||
return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasLTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasLTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplicas int32) error {
|
||||
return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error {
|
||||
@ -416,3 +420,89 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
||||
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
func (d *deploymentTester) scaleDeployment(newReplicas int32) error {
|
||||
var err error
|
||||
d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
||||
if err := d.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rs, err := d.expectNewReplicaSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *rs.Spec.Replicas != newReplicas {
|
||||
return fmt.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForReadyReplicas waits for number of ready replicas to equal number of replicas.
|
||||
func (d *deploymentTester) waitForReadyReplicas() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to wait for .readyReplicas to equal .replicas: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markUpdatedPodsReadyWithoutComplete marks updated Deployment pods as ready without waiting for deployment to complete.
|
||||
func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error {
|
||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
pods, err := d.listUpdatedPods()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if podutil.IsPodReady(&pod) {
|
||||
continue
|
||||
}
|
||||
if err = markPodReady(d.c, d.deployment.Namespace, &pod); err != nil {
|
||||
d.t.Logf("failed to update Deployment pod %q, will retry later: %v", pod.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to mark all updated pods as ready: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify all replicas fields of DeploymentStatus have desired count.
|
||||
// Immediately return an error when found a non-matching replicas field.
|
||||
func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error {
|
||||
deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err)
|
||||
}
|
||||
if deployment.Status.Replicas != replicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", replicas, deployment.Status.Replicas)
|
||||
}
|
||||
if deployment.Status.UpdatedReplicas != updatedReplicas {
|
||||
return fmt.Errorf("unexpected .updatedReplicas: expect %d, got %d", updatedReplicas, deployment.Status.UpdatedReplicas)
|
||||
}
|
||||
if deployment.Status.ReadyReplicas != readyReplicas {
|
||||
return fmt.Errorf("unexpected .readyReplicas: expect %d, got %d", readyReplicas, deployment.Status.ReadyReplicas)
|
||||
}
|
||||
if deployment.Status.AvailableReplicas != availableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", availableReplicas, deployment.Status.AvailableReplicas)
|
||||
}
|
||||
if deployment.Status.UnavailableReplicas != unavailableReplicas {
|
||||
return fmt.Errorf("unexpected .replicas: expect %d, got %d", unavailableReplicas, deployment.Status.UnavailableReplicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user