Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -51,6 +51,7 @@ import (
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/scheduler/factory"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/test/integration/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -61,9 +62,10 @@ type TestContext struct {
ns *v1.Namespace
clientSet *clientset.Clientset
informerFactory informers.SharedInformerFactory
schedulerConfigFactory scheduler.Configurator
schedulerConfig *scheduler.Config
schedulerConfigFactory factory.Configurator
schedulerConfig *factory.Config
scheduler *scheduler.Scheduler
stopCh chan struct{}
}
// createConfiguratorWithPodInformer creates a configurator for scheduler.
@ -72,30 +74,36 @@ func createConfiguratorWithPodInformer(
clientSet clientset.Interface,
podInformer coreinformers.PodInformer,
informerFactory informers.SharedInformerFactory,
) scheduler.Configurator {
return factory.NewConfigFactory(
schedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
podInformer,
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Storage().V1().StorageClasses(),
v1.DefaultHardPodAffinitySymmetricWeight,
utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
false,
)
stopCh <-chan struct{},
) factory.Configurator {
return factory.NewConfigFactory(&factory.ConfigFactoryArgs{
SchedulerName: schedulerName,
Client: clientSet,
NodeInformer: informerFactory.Core().V1().Nodes(),
PodInformer: podInformer,
PvInformer: informerFactory.Core().V1().PersistentVolumes(),
PvcInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
ReplicationControllerInformer: informerFactory.Core().V1().ReplicationControllers(),
ReplicaSetInformer: informerFactory.Apps().V1().ReplicaSets(),
StatefulSetInformer: informerFactory.Apps().V1().StatefulSets(),
ServiceInformer: informerFactory.Core().V1().Services(),
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
EnableEquivalenceClassCache: utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache),
DisablePreemption: false,
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
BindTimeoutSeconds: 600,
StopCh: stopCh,
})
}
// initTestMasterAndScheduler initializes a test environment and creates a master with default
// configuration.
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
var context TestContext
context := TestContext{
stopCh: make(chan struct{}),
}
// 1. Create master
h := &framework.MasterHolder{Initialized: make(chan struct{})}
@ -135,13 +143,12 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
func initTestScheduler(
t *testing.T,
context *TestContext,
controllerCh chan struct{},
setPodInformer bool,
policy *schedulerapi.Policy,
) *TestContext {
// Pod preemption is enabled by default scheduler configuration, but preemption only happens when PodPriority
// feature gate is enabled at the same time.
return initTestSchedulerWithOptions(t, context, controllerCh, setPodInformer, policy, false)
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, false, true, time.Second)
}
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
@ -149,19 +156,21 @@ func initTestScheduler(
func initTestSchedulerWithOptions(
t *testing.T,
context *TestContext,
controllerCh chan struct{},
setPodInformer bool,
policy *schedulerapi.Policy,
disablePreemption bool,
disableEquivalenceCache bool,
resyncPeriod time.Duration,
) *TestContext {
// Enable EnableEquivalenceClassCache for all integration tests.
defer utilfeaturetesting.SetFeatureGateDuringTest(
t,
utilfeature.DefaultFeatureGate,
features.EnableEquivalenceClassCache, true)()
if !disableEquivalenceCache {
defer utilfeaturetesting.SetFeatureGateDuringTest(
t,
utilfeature.DefaultFeatureGate,
features.EnableEquivalenceClassCache, true)()
}
// 1. Create scheduler
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, time.Second)
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
var podInformer coreinformers.PodInformer
@ -173,7 +182,7 @@ func initTestSchedulerWithOptions(
}
context.schedulerConfigFactory = createConfiguratorWithPodInformer(
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory)
v1.DefaultSchedulerName, context.clientSet, podInformer, context.informerFactory, context.stopCh)
var err error
@ -187,11 +196,6 @@ func initTestSchedulerWithOptions(
t.Fatalf("Couldn't create scheduler config: %v", err)
}
// set controllerCh if provided.
if controllerCh != nil {
context.schedulerConfig.StopEverything = controllerCh
}
// set DisablePreemption option
context.schedulerConfig.DisablePreemption = disablePreemption
@ -246,21 +250,21 @@ func initDisruptionController(context *TestContext) *disruption.DisruptionContro
// initTest initializes a test environment and creates master and scheduler with default
// configuration.
func initTest(t *testing.T, nsPrefix string) *TestContext {
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), nil, true, nil)
return initTestScheduler(t, initTestMaster(t, nsPrefix, nil), true, nil)
}
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
// configuration but with pod preemption disabled.
func initTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
return initTestSchedulerWithOptions(
t, initTestMaster(t, nsPrefix, nil), nil, true, nil, true)
t, initTestMaster(t, nsPrefix, nil), true, nil, true, true, time.Second)
}
// cleanupTest deletes the scheduler and the test namespace. It should be called
// at the end of a test.
func cleanupTest(t *testing.T, context *TestContext) {
// Kill the scheduler.
close(context.schedulerConfig.StopEverything)
close(context.stopCh)
// Cleanup nodes.
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
@ -322,24 +326,35 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
}
// createNode creates a node with the given resource list and
// returns a pointer and error status. If 'res' is nil, a predefined amount of
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
// resource will be used.
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
// if resource is nil, we use a default amount of resources for the node.
if res == nil {
res = &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
}
}
n := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{Unschedulable: false},
Status: v1.NodeStatus{
Capacity: *res,
Images: images,
},
}
return cs.CoreV1().Nodes().Create(n)
return n
}
// createNode creates a node with the given resource list.
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
return cs.CoreV1().Nodes().Create(initNode(name, res, nil))
}
// createNodeWithImages creates a node with the given resource list and images.
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
return cs.CoreV1().Nodes().Create(initNode(name, res, images))
}
// updateNodeStatus updates the status of node.
@ -363,6 +378,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
return nodes[:], nil
}
// nodeTainted return a condition function that returns true if the given node contains
// the taints.
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
return func() (bool, error) {
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
if len(taints) != len(node.Spec.Taints) {
return false, nil
}
for _, taint := range taints {
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
return false, nil
}
}
return true, nil
}
}
// waitForNodeTaints waits for a node to have the target taints and returns
// an error if it does not have taints within the given timeout.
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
}
// cleanupNodes deletes all nodes.
func cleanupNodes(cs clientset.Interface, t *testing.T) {
err := cs.CoreV1().Nodes().DeleteCollection(
metav1.NewDeleteOptions(0), metav1.ListOptions{})
if err != nil {
t.Errorf("error while deleting all nodes: %v", err)
}
}
type pausePodConfig struct {
Name string
Namespace string
@ -442,6 +495,43 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
if err != nil {
return nil, fmt.Errorf("Error creating pause pod: %v", err)
}
if err = waitForPodToSchedule(cs, pod); err != nil {
return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err)
}
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
return pod, fmt.Errorf("Error getting pod %v/%v info: %v", pod.Namespace, pod.Name, err)
}
return pod, nil
}
type podWithContainersConfig struct {
Name string
Namespace string
Containers []v1.Container
}
// initPodWithContainers initializes a pod API object from the given config. This is used primarily for generating
// pods with containers each having a specific image.
func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Namespace: conf.Namespace,
},
Spec: v1.PodSpec{
Containers: conf.Containers,
},
}
return pod
}
// runPodWithContainers creates a pod with given config and containers and waits
// until it is scheduled. It returns its pointer and error status.
func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
}
if err = waitForPodToSchedule(cs, pod); err != nil {
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
}
@ -539,20 +629,20 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
}
// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
// the expected values.
func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything())
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
if err != nil {
return false, err
}
if len(cachedPDBs) != len(pdbs) {
if len(pdbList.Items) != len(pdbs) {
return false, nil
}
for i, pdb := range pdbs {
found := false
for _, cpdb := range cachedPDBs {
for _, cpdb := range pdbList.Items {
if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace {
found = true
if cpdb.Status.CurrentHealthy != pdbPodNum[i] {
@ -638,3 +728,15 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
}
}
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
schedulerCacheIsEmpty := func() (bool, error) {
snapshot := sched.Cache().Snapshot()
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
}
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
}
}