mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
@ -56,8 +56,7 @@ go_test(
|
||||
"rolling_test.go",
|
||||
"sync_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/apps/install:go_default_library",
|
||||
|
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
@ -163,13 +163,15 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
dc.enqueueDeployment = dc.enqueue
|
||||
|
||||
for _, test := range tests {
|
||||
if test.nowFn != nil {
|
||||
nowFn = test.nowFn
|
||||
}
|
||||
got := dc.requeueStuckDeployment(test.d, test.status)
|
||||
if got != test.expected {
|
||||
t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.nowFn != nil {
|
||||
nowFn = test.nowFn
|
||||
}
|
||||
got := dc.requeueStuckDeployment(test.d, test.status)
|
||||
if got != test.expected {
|
||||
t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,32 +312,34 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
test.allRSs = append(test.allRSs, test.newRS)
|
||||
}
|
||||
|
||||
err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason)
|
||||
case !test.lastUpdate.IsZero() && test.lastUpdate != testTime:
|
||||
t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime)
|
||||
case !test.lastTransition.IsZero() && test.lastTransition != testTime:
|
||||
t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime)
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
test.allRSs = append(test.allRSs, test.newRS)
|
||||
}
|
||||
|
||||
err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason)
|
||||
case !test.lastUpdate.IsZero() && test.lastUpdate != testTime:
|
||||
t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime)
|
||||
case !test.lastTransition.IsZero() && test.lastTransition != testTime:
|
||||
t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
@ -104,8 +104,19 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe
|
||||
if newRS != nil && newRS.UID == rsUID {
|
||||
continue
|
||||
}
|
||||
if len(podList.Items) > 0 {
|
||||
return true
|
||||
for _, pod := range podList.Items {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
// Don't count pods in terminal state.
|
||||
continue
|
||||
case v1.PodUnknown:
|
||||
// This happens in situation like when the node is temporarily disconnected from the cluster.
|
||||
// If we can't be sure that the pod is not running, we have to count it.
|
||||
return true
|
||||
default:
|
||||
// Pod is not in terminal phase.
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
136
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
136
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
@ -90,34 +90,138 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
oldRSs []*extensions.ReplicaSet
|
||||
podMap map[types.UID]*v1.PodList
|
||||
|
||||
expected bool
|
||||
hasOldPodsRunning bool
|
||||
}{
|
||||
{
|
||||
name: "no old RSs",
|
||||
expected: false,
|
||||
name: "no old RSs",
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with running pods",
|
||||
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
|
||||
expected: true,
|
||||
name: "old RSs with running pods",
|
||||
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods but with non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 1, nil)},
|
||||
expected: true,
|
||||
name: "old RSs without pods but with non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods or non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 0, nil)},
|
||||
expected: false,
|
||||
name: "old RSs without pods or non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pod in unknown phase present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodUnknown,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with pending pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with running pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"uid-2": {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
"uid-3": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
|
||||
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
|
||||
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
45
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
@ -318,35 +318,40 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
switch {
|
||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||
// Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal
|
||||
// with the PodTemplateSpec of the Deployment, then that is our new ReplicaSet. Otherwise,
|
||||
// this is a hash collision and we need to increment the collisionCount field in the
|
||||
// status of the Deployment and try the creation again.
|
||||
case errors.IsAlreadyExists(err):
|
||||
alreadyExists = true
|
||||
|
||||
// Fetch a copy of the ReplicaSet.
|
||||
rs, rsErr := dc.rsLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
|
||||
if rsErr != nil {
|
||||
return nil, rsErr
|
||||
}
|
||||
|
||||
// If the Deployment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
|
||||
// deep equal to the PodTemplateSpec of the Deployment, it's the Deployment's new ReplicaSet.
|
||||
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
|
||||
// the status of the Deployment and requeue to try the creation in the next sync.
|
||||
controllerRef := metav1.GetControllerOf(rs)
|
||||
if controllerRef != nil && controllerRef.UID == d.UID && deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
createdRS = rs
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
// Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus
|
||||
// and requeue the Deployment.
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
if d.Status.CollisionCount == nil {
|
||||
d.Status.CollisionCount = new(int32)
|
||||
}
|
||||
preCollisionCount := *d.Status.CollisionCount
|
||||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
if d.Status.CollisionCount == nil {
|
||||
d.Status.CollisionCount = new(int32)
|
||||
}
|
||||
// Pass through the matching ReplicaSet as the new ReplicaSet.
|
||||
createdRS = rs
|
||||
err = nil
|
||||
preCollisionCount := *d.Status.CollisionCount
|
||||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil {
|
||||
|
122
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
122
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
@ -267,72 +267,74 @@ func TestScale(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_ = olderTimestamp
|
||||
t.Log(test.name)
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_ = olderTimestamp
|
||||
t.Log(test.name)
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
|
||||
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
|
||||
// Construct the nameToSize map that will hold all the sizes we got our of tests
|
||||
// Skip updating the map if the replica set wasn't updated since there will be
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Construct the nameToSize map that will hold all the sizes we got our of tests
|
||||
// Skip updating the map if the replica set wasn't updated since there will be
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
continue
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
|
||||
continue
|
||||
}
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
return
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
|
||||
return
|
||||
}
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
@ -45,8 +45,7 @@ go_test(
|
||||
"deployment_util_test.go",
|
||||
"hash_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment/util",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -124,18 +124,6 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// GetDeploymentConditionInternal returns the condition with the provided type.
|
||||
func GetDeploymentConditionInternal(status internalextensions.DeploymentStatus, condType internalextensions.DeploymentConditionType) *internalextensions.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
|
||||
// we are about to add already exists and has the same status and reason then we are not going to update.
|
||||
func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) {
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
@ -30,8 +30,7 @@ import (
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updatePodFunc func(pod *v1.Pod) error
|
||||
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
|
||||
// The returned bool value can be used to tell if the pod is actually updated.
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function.
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
var pod *v1.Pod
|
||||
|
||||
|
Reference in New Issue
Block a user