mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
11
vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -55,7 +56,7 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
|
||||
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
|
||||
By("reserving all available cpu")
|
||||
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
|
||||
@ -68,8 +69,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
|
||||
deployment := deployments.Items[0]
|
||||
replicas := uint(*(deployment.Spec.Replicas))
|
||||
|
||||
err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
|
||||
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
|
||||
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
})
|
||||
@ -80,7 +81,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
|
||||
replicas := millicores / 100
|
||||
|
||||
reserveCpu(f, id, 1, 100)
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
|
||||
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
|
||||
@ -124,7 +125,7 @@ func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
CpuRequest: request,
|
||||
}
|
||||
|
Reference in New Issue
Block a user