mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
321
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -18,16 +18,25 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@ -35,12 +44,15 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
||||
"net/http/httptest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type TestContext struct {
|
||||
@ -54,50 +66,194 @@ type TestContext struct {
|
||||
scheduler *scheduler.Scheduler
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
var context TestContext
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
_, context.httpServer, context.closeFn = framework.RunAMaster(masterConfig)
|
||||
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), context.httpServer, t)
|
||||
|
||||
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL})
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, 0)
|
||||
podInformer := factory.NewPodInformer(context.clientSet, 12*time.Hour, v1.DefaultSchedulerName)
|
||||
context.schedulerConfigFactory = factory.NewConfigFactory(
|
||||
v1.DefaultSchedulerName,
|
||||
context.clientSet,
|
||||
context.informerFactory.Core().V1().Nodes(),
|
||||
// createConfiguratorWithPodInformer creates a configurator for scheduler.
|
||||
func createConfiguratorWithPodInformer(
|
||||
schedulerName string,
|
||||
clientSet clientset.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) scheduler.Configurator {
|
||||
return factory.NewConfigFactory(
|
||||
schedulerName,
|
||||
clientSet,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
context.informerFactory.Core().V1().PersistentVolumes(),
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().ReplicationControllers(),
|
||||
context.informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
context.informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
true,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
var context TestContext
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
if admission != nil {
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
context.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
context.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
)
|
||||
return &context
|
||||
}
|
||||
|
||||
// initTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
|
||||
// feature gate is enabled at the same time.
|
||||
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *TestContext,
|
||||
controllerCh chan struct{},
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
disablePreemption bool,
|
||||
) *TestContext {
|
||||
// Enable EnableEquivalenceClassCache for all integration tests.
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(
|
||||
t,
|
||||
utilfeature.DefaultFeatureGate,
|
||||
features.EnableEquivalenceClassCache, true)()
|
||||
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = factory.NewPodInformer(context.clientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = context.informerFactory.Core().V1().Pods()
|
||||
}
|
||||
|
||||
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
|
||||
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
|
||||
|
||||
var err error
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
|
||||
if policy != nil {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.CreateFromConfig(*policy)
|
||||
} else {
|
||||
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
|
||||
// set controllerCh if provided.
|
||||
if controllerCh != nil {
|
||||
context.schedulerConfig.StopEverything = controllerCh
|
||||
}
|
||||
|
||||
// set DisablePreemption option
|
||||
context.schedulerConfig.DisablePreemption = disablePreemption
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
|
||||
go podInformer.Informer().Run(context.schedulerConfig.StopEverything)
|
||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
||||
)
|
||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
||||
Interface: context.clientSet.CoreV1().Events(""),
|
||||
})
|
||||
|
||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
|
||||
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
|
||||
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{
|
||||
Config: context.schedulerConfig},
|
||||
nil...)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
context.scheduler.Run()
|
||||
return &context
|
||||
return context
|
||||
}
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(context *TestContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
|
||||
dc := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Extensions().V1beta1().ReplicaSets(),
|
||||
informers.Extensions().V1beta1().Deployments(),
|
||||
informers.Apps().V1beta1().StatefulSets(),
|
||||
context.clientSet)
|
||||
|
||||
informers.Start(context.schedulerConfig.StopEverything)
|
||||
informers.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
go dc.Run(context.schedulerConfig.StopEverything)
|
||||
return dc
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
@ -113,7 +269,8 @@ func cleanupTest(t *testing.T, context *TestContext) {
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
passFunc func(n interface{}) bool) error {
|
||||
nodes := []*v1.Node{}
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
@ -185,6 +342,12 @@ func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.
|
||||
return cs.CoreV1().Nodes().Create(n)
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
func updateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
_, err := cs.CoreV1().Nodes().UpdateStatus(node)
|
||||
return err
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
@ -228,7 +391,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: conf.Name,
|
||||
Image: framework.GetPauseImageName(cs),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
Tolerations: conf.Tolerations,
|
||||
@ -252,7 +415,8 @@ func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) {
|
||||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and returns its pointer and error status. The resource list can be
|
||||
// nil.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string,
|
||||
nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
var conf pausePodConfig
|
||||
if res == nil {
|
||||
conf = pausePodConfig{
|
||||
@ -346,7 +510,8 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
||||
return false, nil
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,6 +539,59 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cachedPDBs) != len(pdbs) {
|
||||
return false, nil
|
||||
}
|
||||
for i, pdb := range pdbs {
|
||||
found := false
|
||||
for _, cpdb := range cachedPDBs {
|
||||
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
|
||||
found = true
|
||||
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pods) != len(cachedPods) {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
cachedPod, err2 := context.scheduler.Config().SchedulerCache.GetPod(actualPod)
|
||||
if err2 != nil || cachedPod == nil {
|
||||
return false, err2
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
func deletePod(cs clientset.Interface, podName string, nsName string) error {
|
||||
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
|
||||
@ -388,8 +606,35 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// noPodsInNamespace returns true if no pods in the given namespace.
|
||||
func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(podNamespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(pods.Items) == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to
|
||||
// be actually deleted.
|
||||
func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
if err := cs.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}); err != nil {
|
||||
t.Errorf("error while listing pod in namespace %v: %v", ns, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
noPodsInNamespace(cs, ns)); err != nil {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user