vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -22,10 +22,10 @@ go_library(
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/scheduling:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
@ -37,7 +37,7 @@ go_library(
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -28,6 +28,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -91,8 +92,8 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err
}, rcName, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, rcName)
}, ns, rcName, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, rcName)
// the first replica pod is scheduled, and the second pod will be rejected.
verifyResult(cs, 1, 1, ns)
})
@ -139,8 +140,8 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
},
},
}
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, framework.GetPauseImageName(f.ClientSet))
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, affinityRCName)
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName())
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, affinityRCName)
// RC should be running successfully
// TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event,
@ -155,9 +156,9 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
// use scale to create another equivalent pod and wait for failure event
WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
return err
}, affinityRCName, false)
}, ns, affinityRCName, false)
// and this new pod should be rejected since node label has been updated
verifyReplicasResult(cs, replica, 1, ns, affinityRCName)
})
@ -166,7 +167,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
@ -216,13 +217,13 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
},
}
rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity,
framework.GetPauseImageName(f.ClientSet), map[string]string{k: v})
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, labelRCName)
imageutils.GetPauseImageName(), map[string]string{k: v})
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, labelRCName)
WaitForSchedulerAfterAction(f, func() error {
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
return err
}, labelRCName, false)
}, ns, labelRCName, false)
// these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"}
verifyReplicasResult(cs, 0, replica, ns, labelRCName)
@ -273,7 +274,7 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
NodeSelector: nodeSelector,

View File

@ -23,12 +23,12 @@ import (
"k8s.io/api/core/v1"
)
func scheduleSuccessEvent(podName, nodeName string) func(*v1.Event) bool {
func scheduleSuccessEvent(ns, podName, nodeName string) func(*v1.Event) bool {
return func(e *v1.Event) bool {
return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" &&
strings.HasPrefix(e.Name, podName) &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", podName, nodeName))
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v/%v to %v", ns, podName, nodeName))
}
}

View File

@ -42,10 +42,10 @@ var _ = SIGDescribe("LimitRange", func() {
It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
By("Creating a LimitRange")
min := getResourceList("50m", "100Mi")
max := getResourceList("500m", "500Mi")
defaultLimit := getResourceList("500m", "500Mi")
defaultRequest := getResourceList("100m", "200Mi")
min := getResourceList("50m", "100Mi", "100Gi")
max := getResourceList("500m", "500Mi", "500Gi")
defaultLimit := getResourceList("500m", "500Mi", "500Gi")
defaultRequest := getResourceList("100m", "200Mi", "200Gi")
maxLimitRequestRatio := v1.ResourceList{}
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
min, max,
@ -104,7 +104,7 @@ var _ = SIGDescribe("LimitRange", func() {
}
By("Creating a Pod with partial resource requirements")
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
@ -114,7 +114,7 @@ var _ = SIGDescribe("LimitRange", func() {
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi", "150Gi"), Limits: getResourceList("300m", "500Mi", "500Gi")}
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
@ -125,28 +125,28 @@ var _ = SIGDescribe("LimitRange", func() {
}
By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Updating a LimitRange")
newMin := getResourceList("9m", "49Mi")
newMin := getResourceList("9m", "49Mi", "49Gi")
limitRange.Spec.Limits[0].Min = newMin
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
@ -183,144 +183,13 @@ var _ = SIGDescribe("LimitRange", func() {
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = framework.KubeDescribe("LimitRange", func() {
f := framework.NewDefaultFramework("limitrange")
BeforeEach(func() {
// only run the tests when LocalStorageCapacityIsolation feature is enabled
framework.SkipUnlessLocalEphemeralStorageEnabled()
})
It("should create a LimitRange with default ephemeral storage and ensure pod has the default applied.", func() {
By("Creating a LimitRange")
min := getEphemeralStorageResourceList("100Mi")
max := getEphemeralStorageResourceList("500Mi")
defaultLimit := getEphemeralStorageResourceList("500Mi")
defaultRequest := getEphemeralStorageResourceList("200Mi")
maxLimitRequestRatio := v1.ResourceList{}
limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio)
limitRange, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Removing limitrange")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(limitRange.Name, nil)
Expect(err).NotTo(HaveOccurred())
}()
By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements")
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Removing pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
}()
By("Ensuring Pod has resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
}
}
By("Creating a Pod with request")
pod = f.NewTestPod("pod-partial-resources", getEphemeralStorageResourceList("150m"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Removing pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
}()
By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// If you specify a Request, and no Limit, the Limit will be set to default limit
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("150Mi"), Limits: defaultLimit}
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
}
}
By("Creating a Pod with limit")
pod = f.NewTestPod("pod-partial-resources", v1.ResourceList{}, getEphemeralStorageResourceList("300m"))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Removing pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
}()
By("Ensuring Pod has merged resource requirements applied from LimitRange")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
expected = v1.ResourceRequirements{Requests: getEphemeralStorageResourceList("300Mi"), Limits: getEphemeralStorageResourceList("300Mi")}
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
// Print the pod to help in debugging.
framework.Logf("Pod %+v does not have the expected requirements", pod)
Expect(err).NotTo(HaveOccurred())
}
}
By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("50Mi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getEphemeralStorageResourceList("600Mi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
})
})
func getEphemeralStorageResourceList(ephemeralStorage string) v1.ResourceList {
res := v1.ResourceList{}
if ephemeralStorage != "" {
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
}
return res
}
func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests)
@ -346,7 +215,7 @@ func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
return nil
}
func getResourceList(cpu, memory string) v1.ResourceList {
func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[v1.ResourceCPU] = resource.MustParse(cpu)
@ -354,6 +223,9 @@ func getResourceList(cpu, memory string) v1.ResourceList {
if memory != "" {
res[v1.ResourceMemory] = resource.MustParse(memory)
}
if ephemeralStorage != "" {
res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
}
return res
}

View File

@ -40,54 +40,11 @@ const (
driverInstallTimeout = 10 * time.Minute
)
type podCreationFuncType func() *v1.Pod
var (
gpuResourceName v1.ResourceName
dsYamlUrl string
podCreationFunc podCreationFuncType
)
func makeCudaAdditionTestPod() *v1.Pod {
podName := testPodNamePrefix + string(uuid.NewUUID())
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "vector-addition",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpuResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "nvidia-libraries",
MountPath: "/usr/local/nvidia/lib64",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "nvidia-libraries",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/home/kubernetes/bin/nvidia/lib",
},
},
},
},
},
}
return testPod
}
func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
podName := testPodNamePrefix + string(uuid.NewUUID())
testPod := &v1.Pod{
@ -163,27 +120,20 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
}
framework.Logf("Cluster is running on COS. Proceeding with test")
if f.BaseName == "gpus" {
dsYamlUrl = "https://raw.githubusercontent.com/ContainerEngine/accelerators/master/cos-nvidia-gpu-installer/daemonset.yaml"
gpuResourceName = v1.ResourceNvidiaGPU
podCreationFunc = makeCudaAdditionTestPod
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" {
dsYamlUrl = dsYamlUrlFromEnv
} else {
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" {
dsYamlUrl = dsYamlUrlFromEnv
} else {
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
}
gpuResourceName = framework.NVIDIAGPUResourceName
podCreationFunc = makeCudaAdditionDevicePluginTestPod
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
}
gpuResourceName = framework.NVIDIAGPUResourceName
framework.Logf("Using %v", dsYamlUrl)
// Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlUrl)
Expect(err).NotTo(HaveOccurred())
ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Create(ds)
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
framework.Logf("Successfully created daemonset to install Nvidia drivers.")
@ -199,7 +149,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
var rsgather *framework.ContainerResourceGatherer
if setupResourceGatherer {
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second, true}, pods)
rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, MasterOnly: false, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()
}
@ -218,7 +168,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app")
podList := []*v1.Pod{}
for i := int64(0); i < getGPUsAvailable(f); i++ {
podList = append(podList, f.PodClient().Create(podCreationFunc()))
podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod()))
}
framework.Logf("Wait for all test pods to succeed")
// Wait for all pods to succeed
@ -234,13 +184,6 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
framework.ExpectNoError(err, "getting resource usage summary")
}
var _ = SIGDescribe("[Feature:GPU]", func() {
f := framework.NewDefaultFramework("gpus")
It("run Nvidia GPU tests on Container Optimized OS only", func() {
testNvidiaGPUsOnCOS(f)
})
})
var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus")
It("run Nvidia GPU Device Plugin tests on Container Optimized OS only", func() {

View File

@ -21,7 +21,6 @@ import (
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -32,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -47,6 +47,7 @@ var masterNodes sets.String
type pausePodConfig struct {
Name string
Namespace string
Affinity *v1.Affinity
Annotations, Labels, NodeSelector map[string]string
Resources *v1.ResourceRequirements
@ -71,7 +72,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller")
err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName)
err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
framework.ExpectNoError(err)
}
})
@ -147,7 +148,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
}), podName, false)
}), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns)
})
@ -222,7 +223,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
},
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false)
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, podsNeededForSaturation, 1, ns)
})
@ -337,7 +338,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
},
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false)
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, len(fillerPods), 1, ns)
})
@ -362,33 +363,10 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false)
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns)
})
It("validates that a pod with an invalid NodeAffinity is rejected", func() {
By("Trying to launch a pod with an invalid Affinity data.")
podName := "without-label"
_, err := cs.CoreV1().Pods(ns).Create(initPausePod(f, pausePodConfig{
Name: podName,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{},
},
},
},
},
},
}))
if err == nil || !errors.IsInvalid(err) {
framework.Failf("Expect error of invalid, got : %v", err)
}
})
/*
Testname: scheduler-node-selector-matching
Description: Ensure that scheduler respects the NodeSelector field
@ -461,7 +439,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
},
Labels: map[string]string{"name": "restricted"},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podName, false)
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
verifyResult(cs, 0, 1, ns)
})
@ -585,11 +563,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
NodeSelector: map[string]string{labelKey: labelValue},
}
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), podNameNoTolerations, false)
WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
verifyResult(cs, 0, 1, ns)
By("Removing taint off the node")
WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), podNameNoTolerations, true)
WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
verifyResult(cs, 1, 0, ns)
})
@ -609,14 +587,15 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to create a pod(pod1) with hostport 80 and hostIP 127.0.0.1 and expect scheduled")
creatHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", v1.ProtocolTCP, nodeSelector, true)
port := int32(54321)
By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
creatHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
By("Trying to create another pod(pod2) with hostport 80 but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled")
creatHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", v1.ProtocolTCP, nodeSelector, true)
By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
creatHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
By("Trying to create a third pod(pod3) with hostport 80, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides")
creatHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", v1.ProtocolUDP, nodeSelector, true)
By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
creatHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
})
It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
@ -634,18 +613,21 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to create a pod(pod4) with hostport 80 and hostIP 0.0.0.0(empty string here) and expect scheduled")
creatHostPortPodOnNode(f, "pod4", ns, "", v1.ProtocolTCP, nodeSelector, true)
port := int32(54322)
By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
creatHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
By("Trying to create another pod(pod5) with hostport 80 but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled")
creatHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", v1.ProtocolTCP, nodeSelector, false)
By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
creatHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
})
})
func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
var gracePeriod = int64(1)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Namespace: conf.Namespace,
Labels: conf.Labels,
Annotations: conf.Annotations,
OwnerReferences: conf.OwnerReferences,
@ -656,13 +638,14 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
Containers: []v1.Container{
{
Name: conf.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Ports: conf.Ports,
},
},
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
PriorityClassName: conf.PriorityClassName,
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
PriorityClassName: conf.PriorityClassName,
TerminationGracePeriodSeconds: &gracePeriod,
},
}
if conf.Resources != nil {
@ -672,7 +655,11 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
}
func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
namespace := conf.Namespace
if len(namespace) == 0 {
namespace = f.Namespace.Name
}
pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf))
framework.ExpectNoError(err)
return pod
}
@ -680,7 +667,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return pod
}
@ -734,10 +721,10 @@ func createPausePodAction(f *framework.Framework, conf pausePodConfig) common.Ac
// WaitForSchedulerAfterAction performs the provided action and then waits for
// scheduler to act on the given pod.
func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, podName string, expectSuccess bool) {
func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, ns, podName string, expectSuccess bool) {
predicate := scheduleFailureEvent(podName)
if expectSuccess {
predicate = scheduleSuccessEvent(podName, "" /* any node */)
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
}
success, err := common.ObserveEventAfterAction(f, predicate, action)
Expect(err).NotTo(HaveOccurred())
@ -821,7 +808,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
@ -832,12 +819,12 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
}
// create pod which using hostport on the specified node according to the nodeSelector
func creatHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
func creatHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
createPausePod(f, pausePodConfig{
Name: podName,
Ports: []v1.ContainerPort{
{
HostPort: 80,
HostPort: port,
ContainerPort: 80,
Protocol: protocol,
HostIP: hostIP,

View File

@ -21,12 +21,12 @@ import (
"time"
"k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1alpha1"
schedulerapi "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -51,12 +51,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
cs = f.ClientSet
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
_, err := f.ClientSet.SchedulingV1alpha1().PriorityClasses().Create(&v1alpha1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1alpha1().PriorityClasses().Create(&v1alpha1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority})
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: mediumPriorityClassName}, Value: mediumPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
_, err = f.ClientSet.SchedulingV1alpha1().PriorityClasses().Create(&v1alpha1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority})
_, err = f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: lowPriorityClassName}, Value: lowPriority})
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
framework.WaitForAllNodesHealthy(cs, time.Minute)
@ -168,7 +167,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
// Create a critical pod and make sure it is scheduled.
runPausePod(f, pausePodConfig{
Name: "critical-pod",
PriorityClassName: schedulerapi.SystemClusterCritical,
Namespace: metav1.NamespaceSystem,
PriorityClassName: scheduling.SystemClusterCritical,
Resources: &v1.ResourceRequirements{
Requests: podRes,
},
@ -184,6 +184,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
framework.ExpectNoError(err)
Expect(livePod.DeletionTimestamp).To(BeNil())
}
// Clean-up the critical pod
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
// This test verifies that when a high priority pod is pending and its
@ -311,3 +314,38 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func
}
})
})
var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", func() {
var cs clientset.Interface
var ns string
f := framework.NewDefaultFramework("sched-pod-priority")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
})
// This test verifies that system critical priorities are created automatically and resolved properly.
It("validates critical system priorities are created and resolved", func() {
// Create pods that use system critical priorities and
By("Create pods that use critical system priorities.")
systemPriorityClasses := []string{
scheduling.SystemNodeCritical, scheduling.SystemClusterCritical,
}
for i, spc := range systemPriorityClasses {
pod := createPausePod(f, pausePodConfig{
Name: fmt.Sprintf("pod%d-%v", i, spc),
Namespace: metav1.NamespaceSystem,
PriorityClassName: spc,
})
Expect(pod.Spec.Priority).NotTo(BeNil())
framework.Logf("Created pod: %v", pod.Name)
// Clean-up the pod.
err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
})
})

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type Resource struct {
@ -152,7 +153,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rc.Name); err != nil {
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
}
}()
@ -194,7 +195,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: "name=scheduler-priority-avoid-pod",
})
@ -376,7 +377,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string,
Containers: []v1.Container{
{
Name: rsName,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Resources: *resource,
},
},

View File

@ -26,6 +26,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -55,7 +56,7 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu")
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
@ -68,8 +69,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() {
deployment := deployments.Items[0]
replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
@ -80,7 +81,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
replicas := millicores / 100
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
@ -124,7 +125,7 @@ func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
CpuRequest: request,
}

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -43,83 +44,6 @@ const (
var classGold string = "gold"
var extendedResourceName string = "example.com/dongle"
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("resourcequota")
BeforeEach(func() {
// only run the tests when LocalStorageCapacityIsolation feature is enabled
framework.SkipUnlessLocalEphemeralStorageEnabled()
})
It("should create a ResourceQuota and capture the life of a pod.", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuotaForEphemeralStorage(quotaName)
resourceQuota, err := createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Removing resourceQuota")
err = deleteResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota.Name)
Expect(err).NotTo(HaveOccurred())
}()
By("Ensuring resource quota status is calculated")
usedResources := v1.ResourceList{}
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod that fits quota")
podName := "test-pod"
requests := v1.ResourceList{}
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
podToUpdate := pod
defer func() {
By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}()
By("Ensuring ResourceQuota status captures the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota")
requests = v1.ResourceList{}
requests[v1.ResourceEphemeralStorage] = resource.MustParse("300Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements.
requests = v1.ResourceList{}
requests[v1.ResourceEphemeralStorage] = resource.MustParse("100Mi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred())
By("Ensuring attempts to update pod resource requirements did not change quota usage")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("resourcequota")
@ -372,6 +296,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
limits := v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("252Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod := newTestPodForQuota(f, podName, requests, limits)
@ -384,6 +309,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@ -401,6 +327,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
limits = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
@ -412,6 +339,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests = v1.ResourceList{}
requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred())
@ -429,6 +357,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[v1.ResourceMemory] = resource.MustParse("0")
usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@ -821,10 +750,11 @@ func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1
}
}
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing alpha feature LocalStorageCapacityIsolation
// newTestResourceQuotaForEphemeralStorage returns a quota that enforces default constraints for testing feature LocalStorageCapacityIsolation
func newTestResourceQuotaForEphemeralStorage(name string) *v1.ResourceQuota {
hard := v1.ResourceList{}
hard[v1.ResourceEphemeralStorage] = resource.MustParse("500Mi")
hard[v1.ResourceQuotas] = resource.MustParse("1")
return &v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.ResourceQuotaSpec{Hard: hard},
@ -846,6 +776,7 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
// test quota on discovered resource type
@ -868,7 +799,7 @@ func newTestPodForQuota(f *framework.Framework, name string, requests v1.Resourc
Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,

View File

@ -32,6 +32,7 @@ import (
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("Multi-AZ Clusters", func() {
@ -91,7 +92,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Containers: []v1.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
},
},
},
@ -223,7 +224,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()