mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
63
vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"gc_controller.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podgc",
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["gc_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podgc",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/controller/podgc/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/pkg/controller/podgc/OWNERS
generated
vendored
Executable file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- foxish
|
||||
- gmarek
|
||||
reviewers:
|
||||
- foxish
|
||||
- gmarek
|
25
vendor/k8s.io/kubernetes/pkg/controller/podgc/doc.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/controller/podgc/doc.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podgc contains a very simple pod "garbage collector" implementation,
|
||||
// PodGCController, that runs in the controller manager. If the number of pods
|
||||
// in terminated phases (right now either Failed or Succeeded) surpasses a
|
||||
// configurable threshold, the controller will delete pods in terminated state
|
||||
// until the system reaches the allowed threshold again. The PodGCController
|
||||
// prioritizes pods to delete by sorting by creation timestamp and deleting the
|
||||
// oldest objects first. The PodGCController will not delete non-terminated
|
||||
// pods.
|
||||
package podgc // import "k8s.io/kubernetes/pkg/controller/podgc"
|
200
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go
generated
vendored
Normal file
200
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podgc
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
gcCheckPeriod = 20 * time.Second
|
||||
)
|
||||
|
||||
type PodGCController struct {
|
||||
kubeClient clientset.Interface
|
||||
|
||||
podLister corelisters.PodLister
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
deletePod func(namespace, name string) error
|
||||
terminatedPodThreshold int
|
||||
}
|
||||
|
||||
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
gcc := &PodGCController{
|
||||
kubeClient: kubeClient,
|
||||
terminatedPodThreshold: terminatedPodThreshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
|
||||
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
|
||||
gcc.podLister = podInformer.Lister()
|
||||
gcc.podListerSynced = podInformer.Informer().HasSynced
|
||||
|
||||
return gcc
|
||||
}
|
||||
|
||||
func (gcc *PodGCController) Run(stop <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting GC controller")
|
||||
defer glog.Infof("Shutting down GC controller")
|
||||
|
||||
if !controller.WaitForCacheSync("GC", stop, gcc.podListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
go wait.Until(gcc.gc, gcCheckPeriod, stop)
|
||||
|
||||
<-stop
|
||||
}
|
||||
|
||||
func (gcc *PodGCController) gc() {
|
||||
pods, err := gcc.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing all Pods: %v", err)
|
||||
return
|
||||
}
|
||||
if gcc.terminatedPodThreshold > 0 {
|
||||
gcc.gcTerminated(pods)
|
||||
}
|
||||
gcc.gcOrphaned(pods)
|
||||
gcc.gcUnscheduledTerminating(pods)
|
||||
}
|
||||
|
||||
func isPodTerminated(pod *v1.Pod) bool {
|
||||
if phase := pod.Status.Phase; phase != v1.PodPending && phase != v1.PodRunning && phase != v1.PodUnknown {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
|
||||
terminatedPods := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
if isPodTerminated(pod) {
|
||||
terminatedPods = append(terminatedPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
terminatedPodCount := len(terminatedPods)
|
||||
sort.Sort(byCreationTimestamp(terminatedPods))
|
||||
|
||||
deleteCount := terminatedPodCount - gcc.terminatedPodThreshold
|
||||
|
||||
if deleteCount > terminatedPodCount {
|
||||
deleteCount = terminatedPodCount
|
||||
}
|
||||
if deleteCount > 0 {
|
||||
glog.Infof("garbage collecting %v pods", deleteCount)
|
||||
}
|
||||
|
||||
var wait sync.WaitGroup
|
||||
for i := 0; i < deleteCount; i++ {
|
||||
wait.Add(1)
|
||||
go func(namespace string, name string) {
|
||||
defer wait.Done()
|
||||
if err := gcc.deletePod(namespace, name); err != nil {
|
||||
// ignore not founds
|
||||
defer utilruntime.HandleError(err)
|
||||
}
|
||||
}(terminatedPods[i].Namespace, terminatedPods[i].Name)
|
||||
}
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
// gcOrphaned deletes pods that are bound to nodes that don't exist.
|
||||
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing orphaned")
|
||||
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
|
||||
nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nodeNames := sets.NewString()
|
||||
for i := range nodes.Items {
|
||||
nodeNames.Insert(nodes.Items[i].Name)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.NodeName == "" {
|
||||
continue
|
||||
}
|
||||
if nodeNames.Has(pod.Spec.NodeName) {
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("Found orphaned Pod %v assigned to the Node %v. Deleting.", pod.Name, pod.Spec.NodeName)
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(0).Infof("Forced deletion of orphaned Pod %s succeeded", pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
|
||||
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp == nil || len(pod.Spec.NodeName) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Found unscheduled terminating Pod %v not assigned to any Node. Deleting.", pod.Name)
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(0).Infof("Forced deletion of unscheduled terminating Pod %s succeeded", pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
301
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller_test.go
generated
vendored
Normal file
301
vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller_test.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podgc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
type FakeController struct{}
|
||||
|
||||
func (*FakeController) Run(<-chan struct{}) {}
|
||||
|
||||
func (*FakeController) HasSynced() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (*FakeController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func alwaysReady() bool { return true }
|
||||
|
||||
func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (*PodGCController, coreinformers.PodInformer) {
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
|
||||
controller.podListerSynced = alwaysReady
|
||||
return controller, podInformer
|
||||
}
|
||||
|
||||
func TestGCTerminated(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase v1.PodPhase
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
pods []nameToPhase
|
||||
threshold int
|
||||
deletedPodNames sets.String
|
||||
}{
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 0,
|
||||
// threshold = 0 disables terminated pod deletion
|
||||
deletedPodNames: sets.NewString(),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
{name: "c", phase: v1.PodFailed},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a", "b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodRunning},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
{name: "c", phase: v1.PodFailed},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 5,
|
||||
deletedPodNames: sets.NewString(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*testutil.NewNode("node")}})
|
||||
gcc, podInformer := NewFromClient(client, test.threshold)
|
||||
deletedPodNames := make([]string, 0)
|
||||
var lock sync.Mutex
|
||||
gcc.deletePod = func(_, name string) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
deletedPodNames = append(deletedPodNames, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
}
|
||||
|
||||
gcc.gc()
|
||||
|
||||
pass := true
|
||||
for _, pod := range deletedPodNames {
|
||||
if !test.deletedPodNames.Has(pod) {
|
||||
pass = false
|
||||
}
|
||||
}
|
||||
if len(deletedPodNames) != len(test.deletedPodNames) {
|
||||
pass = false
|
||||
}
|
||||
if !pass {
|
||||
t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v", i, test.deletedPodNames, deletedPodNames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCOrphaned(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase v1.PodPhase
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
pods []nameToPhase
|
||||
threshold int
|
||||
deletedPodNames sets.String
|
||||
}{
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 0,
|
||||
deletedPodNames: sets.NewString("a", "b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodRunning},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
client := fake.NewSimpleClientset()
|
||||
gcc, podInformer := NewFromClient(client, test.threshold)
|
||||
deletedPodNames := make([]string, 0)
|
||||
var lock sync.Mutex
|
||||
gcc.deletePod = func(_, name string) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
deletedPodNames = append(deletedPodNames, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
}
|
||||
|
||||
pods, err := podInformer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while listing all Pods: %v", err)
|
||||
return
|
||||
}
|
||||
gcc.gcOrphaned(pods)
|
||||
|
||||
pass := true
|
||||
for _, pod := range deletedPodNames {
|
||||
if !test.deletedPodNames.Has(pod) {
|
||||
pass = false
|
||||
}
|
||||
}
|
||||
if len(deletedPodNames) != len(test.deletedPodNames) {
|
||||
pass = false
|
||||
}
|
||||
if !pass {
|
||||
t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v", i, test.deletedPodNames, deletedPodNames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCUnscheduledTerminating(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase v1.PodPhase
|
||||
deletionTimeStamp *metav1.Time
|
||||
nodeName string
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods []nameToPhase
|
||||
deletedPodNames sets.String
|
||||
}{
|
||||
{
|
||||
name: "Unscheduled pod in any phase must be deleted",
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
|
||||
{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
|
||||
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
|
||||
},
|
||||
deletedPodNames: sets.NewString("a", "b", "c"),
|
||||
},
|
||||
{
|
||||
name: "Scheduled pod in any phase must not be deleted",
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: v1.PodFailed, deletionTimeStamp: nil, nodeName: ""},
|
||||
{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: nil, nodeName: "node"},
|
||||
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: "node"},
|
||||
},
|
||||
deletedPodNames: sets.NewString(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
client := fake.NewSimpleClientset()
|
||||
gcc, podInformer := NewFromClient(client, -1)
|
||||
deletedPodNames := make([]string, 0)
|
||||
var lock sync.Mutex
|
||||
gcc.deletePod = func(_, name string) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
deletedPodNames = append(deletedPodNames, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
|
||||
DeletionTimestamp: pod.deletionTimeStamp},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: pod.nodeName},
|
||||
})
|
||||
}
|
||||
|
||||
pods, err := podInformer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Errorf("Error while listing all Pods: %v", err)
|
||||
return
|
||||
}
|
||||
gcc.gcUnscheduledTerminating(pods)
|
||||
|
||||
pass := true
|
||||
for _, pod := range deletedPodNames {
|
||||
if !test.deletedPodNames.Has(pod) {
|
||||
pass = false
|
||||
}
|
||||
}
|
||||
if len(deletedPodNames) != len(test.deletedPodNames) {
|
||||
pass = false
|
||||
}
|
||||
if !pass {
|
||||
t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v, test: %v", i, test.deletedPodNames, deletedPodNames, test.name)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user