mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
2
vendor/k8s.io/kubernetes/test/cmd/core.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/cmd/core.sh
generated
vendored
@ -739,7 +739,7 @@ run_secrets_test() {
|
||||
# Post-condition: secret exists and has expected values
|
||||
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
|
||||
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson'
|
||||
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockerconfigjson:')" ]]
|
||||
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ0ZXN0LXVzZXIiLCJwYXNzd29yZCI6InRlc3QtcGFzc3dvcmQiLCJlbWFpbCI6InRlc3QtdXNlckB0ZXN0LmNvbSIsImF1dGgiOiJkR1Z6ZEMxMWMyVnlPblJsYzNRdGNHRnpjM2R2Y21RPSJ9fX0=')" ]]
|
||||
# Clean-up
|
||||
kubectl delete secret test-secret --namespace=test-secrets
|
||||
|
||||
|
17
vendor/k8s.io/kubernetes/test/cmd/diff.sh
generated
vendored
17
vendor/k8s.io/kubernetes/test/cmd/diff.sh
generated
vendored
@ -40,3 +40,20 @@ run_kubectl_diff_tests() {
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_diff_same_names() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Test kubectl diff with multiple resources with the same name"
|
||||
|
||||
output_message=$(KUBECTL_EXTERNAL_DIFF=find kubectl diff -Rf hack/testdata/diff/)
|
||||
kube::test::if_has_string "${output_message}" 'v1\.Pod\..*\.test'
|
||||
kube::test::if_has_string "${output_message}" 'apps\.v1\.Deployment\..*\.test'
|
||||
kube::test::if_has_string "${output_message}" 'v1\.ConfigMap\..*\.test'
|
||||
kube::test::if_has_string "${output_message}" 'v1\.Secret\..*\.test'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/cmd/legacy-script.sh
generated
vendored
1
vendor/k8s.io/kubernetes/test/cmd/legacy-script.sh
generated
vendored
@ -473,6 +473,7 @@ runTests() {
|
||||
# Kubectl diff #
|
||||
################
|
||||
record_command run_kubectl_diff_tests
|
||||
record_command run_kubectl_diff_same_names
|
||||
|
||||
###############
|
||||
# Kubectl get #
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -250,8 +250,10 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
||||
// Job should get deleted when the next job finishes the next minute
|
||||
By("Ensuring this job does not exist anymore")
|
||||
err = waitForJobNotExist(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
By("Ensuring this job and its pods does not exist anymore")
|
||||
err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
@ -380,8 +382,8 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
// Wait for a job to disappear by listing them explicitly.
|
||||
func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -397,6 +399,18 @@ func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a pod to disappear by listing them explicitly.
|
||||
func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(pods.Items) == 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -315,7 +315,8 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = clientCfg.BearerTokenFile
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/base.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/base.go
generated
vendored
@ -76,18 +76,26 @@ type DynamicPVTestDriver interface {
|
||||
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
|
||||
}
|
||||
|
||||
// Capability represents a feature that a volume plugin supports
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
CapPersistence Capability = "persistence" // data is persisted across pod restarts
|
||||
CapBlock Capability = "block" // raw block mode
|
||||
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
|
||||
CapExec Capability = "exec" // exec a file in the volume
|
||||
)
|
||||
|
||||
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
|
||||
type DriverInfo struct {
|
||||
Name string // Name of the driver
|
||||
FeatureTag string // FeatureTag for the driver
|
||||
|
||||
MaxFileSize int64 // Max file size to be tested for this driver
|
||||
SupportedFsType sets.String // Map of string for supported fs type
|
||||
SupportedMountOption sets.String // Map of string for supported mount option
|
||||
RequiredMountOption sets.String // Map of string for required mount option (Optional)
|
||||
IsPersistent bool // Flag to represent whether it provides persistency
|
||||
IsFsGroupSupported bool // Flag to represent whether it supports fsGroup
|
||||
IsBlockSupported bool // Flag to represent whether it supports Block Volume
|
||||
MaxFileSize int64 // Max file size to be tested for this driver
|
||||
SupportedFsType sets.String // Map of string for supported fs type
|
||||
SupportedMountOption sets.String // Map of string for supported mount option
|
||||
RequiredMountOption sets.String // Map of string for required mount option (Optional)
|
||||
Capabilities map[Capability]bool // Map that represents plugin capabilities
|
||||
|
||||
// Parameters below will be set inside test loop by using SetCommonDriverParameters.
|
||||
// Drivers that implement TestDriver is required to set all the above parameters
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go
generated
vendored
@ -67,9 +67,9 @@ func InitHostPathCSIDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -153,9 +153,9 @@ func InitHostV0PathCSIDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -243,9 +243,11 @@ func InitGcePDCSIDriver() TestDriver {
|
||||
"ext4",
|
||||
"xfs",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -334,9 +336,11 @@ func InitGcePDExternalCSIDriver() TestDriver {
|
||||
"ext4",
|
||||
"xfs",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
101
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go
generated
vendored
101
vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go
generated
vendored
@ -90,9 +90,10 @@ func InitNFSDriver() TestDriver {
|
||||
),
|
||||
SupportedMountOption: sets.NewString("proto=tcp", "relatime"),
|
||||
RequiredMountOption: sets.NewString("vers=4.1"),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -235,9 +236,10 @@ func InitGlusterFSDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -356,9 +358,12 @@ func InitISCSIDriver() TestDriver {
|
||||
//"ext3",
|
||||
"ext4",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: true,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapBlock: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -465,9 +470,13 @@ func InitRbdDriver() TestDriver {
|
||||
//"ext3",
|
||||
"ext4",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: true},
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapBlock: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,9 +594,10 @@ func InitCephFSDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -684,9 +694,9 @@ func InitHostPathDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -756,9 +766,9 @@ func InitHostPathSymlinkDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -896,9 +906,9 @@ func InitEmptydirDriver() TestDriver {
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
IsPersistent: false,
|
||||
IsFsGroupSupported: false,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -963,9 +973,11 @@ func InitCinderDriver() TestDriver {
|
||||
"", // Default fsType
|
||||
"ext3",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1121,9 +1133,12 @@ func InitGcePdDriver() TestDriver {
|
||||
"xfs",
|
||||
),
|
||||
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: true,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapBlock: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1235,9 +1250,11 @@ func InitVSphereDriver() TestDriver {
|
||||
"", // Default fsType
|
||||
"ext4",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: false,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1351,9 +1368,12 @@ func InitAzureDriver() TestDriver {
|
||||
"", // Default fsType
|
||||
"ext4",
|
||||
),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: true,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapBlock: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1464,9 +1484,12 @@ func InitAwsDriver() TestDriver {
|
||||
"ext3",
|
||||
),
|
||||
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
||||
IsPersistent: true,
|
||||
IsFsGroupSupported: true,
|
||||
IsBlockSupported: true,
|
||||
Capabilities: map[Capability]bool{
|
||||
CapPersistence: true,
|
||||
CapFsGroup: true,
|
||||
CapBlock: true,
|
||||
CapExec: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/BUILD
generated
vendored
@ -18,7 +18,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go
generated
vendored
@ -187,7 +187,7 @@ func testProvisioning(input *provisioningTestInput) {
|
||||
})
|
||||
|
||||
It("should create and delete block persistent volumes", func() {
|
||||
if !input.dInfo.IsBlockSupported {
|
||||
if !input.dInfo.Capabilities[drivers.CapBlock] {
|
||||
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
|
||||
}
|
||||
block := v1.PersistentVolumeBlock
|
||||
|
84
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go
generated
vendored
84
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go
generated
vendored
@ -22,9 +22,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -362,6 +361,31 @@ func testSubPath(input *subPathTestInput) {
|
||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
||||
})
|
||||
|
||||
It("should be able to unmount after the subpath directory is deleted", func() {
|
||||
// Change volume container to busybox so we can exec later
|
||||
input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s", input.pod.Name))
|
||||
pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||
framework.DeletePodWithWait(input.f, input.f.ClientSet, pod)
|
||||
}()
|
||||
|
||||
// Wait for pod to be running
|
||||
err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||
|
||||
// Exec into container that mounted the volume, delete subpath directory
|
||||
rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir)
|
||||
_, err = podContainerExec(pod, 1, rmCmd)
|
||||
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
|
||||
|
||||
// Delete pod (from defer) and wait for it to be successfully deleted
|
||||
})
|
||||
|
||||
// TODO: add a test case for the same disk with two partitions
|
||||
}
|
||||
|
||||
@ -587,16 +611,54 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
By("Checking for subpath error in container status")
|
||||
err = waitForPodSubpathError(f, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for subpath failure")
|
||||
}
|
||||
|
||||
By("Checking for subpath error event")
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": "Failed",
|
||||
}.AsSelector().String()
|
||||
err = framework.WaitTimeoutForPodEvent(f.ClientSet, pod.Name, f.Namespace.Name, selector, errorMsg, framework.PodStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for failed event to occur")
|
||||
func findSubpathContainerName(pod *v1.Pod) string {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, mount := range container.VolumeMounts {
|
||||
if mount.SubPath != "" {
|
||||
return container.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod) error {
|
||||
subpathContainerName := findSubpathContainerName(pod)
|
||||
if subpathContainerName == "" {
|
||||
return fmt.Errorf("failed to find container that uses subpath")
|
||||
}
|
||||
|
||||
return wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
// 0 is the container that uses subpath
|
||||
if status.Name == subpathContainerName {
|
||||
switch {
|
||||
case status.State.Running != nil:
|
||||
return false, fmt.Errorf("subpath container unexpectedly became running")
|
||||
case status.State.Terminated != nil:
|
||||
return false, fmt.Errorf("subpath container unexpectedly terminated")
|
||||
case status.State.Waiting != nil:
|
||||
if status.State.Waiting.Reason == "CreateContainerConfigError" &&
|
||||
strings.Contains(status.State.Waiting.Message, "subPath") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Tests that the existing subpath mount is detected when a container restarts
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go
generated
vendored
@ -90,7 +90,7 @@ func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericV
|
||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
if dInfo.IsFsGroupSupported {
|
||||
if dInfo.Capabilities[drivers.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go
generated
vendored
@ -78,13 +78,13 @@ func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volume
|
||||
testVolType: pattern.VolType,
|
||||
nodeName: dInfo.Config.ClientNodeName,
|
||||
volMode: pattern.VolMode,
|
||||
isBlockSupported: dInfo.IsBlockSupported,
|
||||
isBlockSupported: dInfo.Capabilities[drivers.CapBlock],
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
isBlockSupported := dInfo.IsBlockSupported
|
||||
isBlockSupported := dInfo.Capabilities[drivers.CapBlock]
|
||||
volMode := pattern.VolMode
|
||||
volType := pattern.VolType
|
||||
|
||||
|
100
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go
generated
vendored
100
vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go
generated
vendored
@ -23,11 +23,17 @@ package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type volumesTestSuite struct {
|
||||
@ -68,12 +74,22 @@ func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
|
||||
}
|
||||
|
||||
func skipPersistenceTest(driver drivers.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.IsPersistent {
|
||||
if !dInfo.Capabilities[drivers.CapPersistence] {
|
||||
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func skipExecTest(driver drivers.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[drivers.CapExec] {
|
||||
framework.Skipf("Driver %q does not support exec - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
|
||||
var fsGroup *int64
|
||||
driver := resource.driver
|
||||
@ -85,16 +101,17 @@ func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVo
|
||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
if dInfo.IsFsGroupSupported {
|
||||
if dInfo.Capabilities[drivers.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
|
||||
return volumesTestInput{
|
||||
f: f,
|
||||
name: dInfo.Name,
|
||||
config: dInfo.Config,
|
||||
fsGroup: fsGroup,
|
||||
f: f,
|
||||
name: dInfo.Name,
|
||||
config: dInfo.Config,
|
||||
fsGroup: fsGroup,
|
||||
resource: resource,
|
||||
tests: []framework.VolumeTest{
|
||||
{
|
||||
Volume: *volSource,
|
||||
@ -140,11 +157,12 @@ func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatte
|
||||
}
|
||||
|
||||
type volumesTestInput struct {
|
||||
f *framework.Framework
|
||||
name string
|
||||
config framework.VolumeTestConfig
|
||||
fsGroup *int64
|
||||
tests []framework.VolumeTest
|
||||
f *framework.Framework
|
||||
name string
|
||||
config framework.VolumeTestConfig
|
||||
fsGroup *int64
|
||||
tests []framework.VolumeTest
|
||||
resource genericVolumeTestResource
|
||||
}
|
||||
|
||||
func testVolumes(input *volumesTestInput) {
|
||||
@ -153,8 +171,68 @@ func testVolumes(input *volumesTestInput) {
|
||||
cs := f.ClientSet
|
||||
defer framework.VolumeTestCleanup(f, input.config)
|
||||
|
||||
skipPersistenceTest(input.resource.driver)
|
||||
|
||||
volumeTest := input.tests
|
||||
framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
|
||||
framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests)
|
||||
})
|
||||
It("should allow exec of files on the volume", func() {
|
||||
f := input.f
|
||||
skipExecTest(input.resource.driver)
|
||||
|
||||
testScriptInPod(f, input.resource.volType, input.resource.volSource, input.config.NodeSelector)
|
||||
})
|
||||
}
|
||||
|
||||
func testScriptInPod(
|
||||
f *framework.Framework,
|
||||
volumeType string,
|
||||
source *v1.VolumeSource,
|
||||
nodeSelector map[string]string) {
|
||||
|
||||
const (
|
||||
volPath = "/vol1"
|
||||
volName = "vol1"
|
||||
)
|
||||
suffix := generateSuffixForPodName(volumeType)
|
||||
scriptName := fmt.Sprintf("test-%s.sh", suffix)
|
||||
fullPath := filepath.Join(volPath, scriptName)
|
||||
cmd := fmt.Sprintf("echo \"ls %s\" > %s; chmod u+x %s; %s", volPath, fullPath, fullPath, fullPath)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("exec-volume-test-%s", suffix),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("exec-container-%s", suffix),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Command: []string{"/bin/sh", "-ec", cmd},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volName,
|
||||
MountPath: volPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volName,
|
||||
VolumeSource: *source,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
NodeSelector: nodeSelector,
|
||||
},
|
||||
}
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
f.TestContainerOutput("exec-volume-test", pod, 0, []string{scriptName})
|
||||
|
||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
|
||||
}
|
||||
|
25
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e_node/node_problem_detector_linux.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -34,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -97,8 +97,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
||||
BeforeEach(func() {
|
||||
By("Calculate Lookback duration")
|
||||
var err error
|
||||
nodeTime, bootTime, err = getNodeTime()
|
||||
|
||||
nodeTime = time.Now()
|
||||
bootTime, err = util.GetBootTime()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Set lookback duration longer than node up time.
|
||||
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
||||
lookback = nodeTime.Sub(bootTime) + time.Hour
|
||||
@ -387,24 +390,6 @@ func injectLog(file string, timestamp time.Time, log string, num int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNodeTime gets node boot time and current time.
|
||||
func getNodeTime() (time.Time, time.Time, error) {
|
||||
// Get node current time.
|
||||
nodeTime := time.Now()
|
||||
|
||||
// Get system uptime.
|
||||
var info syscall.Sysinfo_t
|
||||
if err := syscall.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, time.Time{}, err
|
||||
}
|
||||
// Get node boot time. NOTE that because we get node current time before uptime, the boot time
|
||||
// calculated will be a little earlier than the real boot time. This won't affect the correctness
|
||||
// of the test result.
|
||||
bootTime := nodeTime.Add(-time.Duration(info.Uptime) * time.Second)
|
||||
|
||||
return nodeTime, bootTime, nil
|
||||
}
|
||||
|
||||
// verifyEvents verifies there are num specific events generated
|
||||
func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error {
|
||||
events, err := e.List(options)
|
||||
|
2
vendor/k8s.io/kubernetes/test/images/Makefile
generated
vendored
2
vendor/k8s.io/kubernetes/test/images/Makefile
generated
vendored
@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest
|
||||
REGISTRY ?= gcr.io/kubernetes-e2e-test-images
|
||||
GOARM=7
|
||||
QEMUVERSION=v2.9.1
|
||||
GOLANG_VERSION=1.11.2
|
||||
GOLANG_VERSION=1.11.4
|
||||
export
|
||||
|
||||
ifndef WHAT
|
||||
|
1
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/integration/scheduler/BUILD
generated
vendored
@ -25,6 +25,7 @@ go_test(
|
||||
"//cmd/kube-scheduler/app:go_default_library",
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
|
150
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
150
vendor/k8s.io/kubernetes/test/integration/scheduler/preemption_test.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -72,7 +73,7 @@ func TestPreemption(t *testing.T) {
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -90,7 +91,7 @@ func TestPreemption(t *testing.T) {
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
@ -100,7 +101,7 @@ func TestPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}},
|
||||
@ -236,7 +237,7 @@ func TestPreemption(t *testing.T) {
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
node, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
@ -312,7 +313,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
@ -322,7 +323,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
@ -332,7 +333,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
@ -374,7 +375,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
func mkPriorityPodWithGrace(tc *TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
}
|
||||
pod := initPausePod(tc.clientSet, &pausePodConfig{
|
||||
Name: name,
|
||||
@ -419,7 +420,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
@ -429,7 +430,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
@ -489,6 +490,119 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPreemptionRaces tests that other scheduling events and operations do not
|
||||
// race with the preemption process.
|
||||
func TestPreemptionRaces(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-race")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
numInitialPods int // Pods created and executed before running preemptor
|
||||
numAdditionalPods int // Pods created after creating the preemptor
|
||||
numRepetitions int // Repeat the tests to check races
|
||||
preemptor *v1.Pod
|
||||
}{
|
||||
{
|
||||
// This test ensures that while the preempting pod is waiting for the victims
|
||||
// terminate, other lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
description: "ensures that other pods are not scheduled while preemptor is being marked as nominated (issue #72124)",
|
||||
numInitialPods: 2,
|
||||
numAdditionalPods: 50,
|
||||
numRepetitions: 10,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4900, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(4900, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.numRepetitions <= 0 {
|
||||
test.numRepetitions = 1
|
||||
}
|
||||
for n := 0; n < test.numRepetitions; n++ {
|
||||
initialPods := make([]*v1.Pod, test.numInitialPods)
|
||||
additionalPods := make([]*v1.Pod, test.numAdditionalPods)
|
||||
// Create and run existingPods.
|
||||
for i := 0; i < test.numInitialPods; i++ {
|
||||
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// make sure that initial Pods are all scheduled.
|
||||
for _, p := range initialPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Create the preemptor.
|
||||
klog.Info("Creating the preemptor pod...")
|
||||
preemptor, err := createPausePod(cs, test.preemptor)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||
}
|
||||
|
||||
klog.Info("Creating additional pods...")
|
||||
for i := 0; i < test.numAdditionalPods; i++ {
|
||||
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
}
|
||||
}
|
||||
// Check that the preemptor pod gets nominated node name.
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||
}
|
||||
|
||||
klog.Info("Check unschedulable pods still exists and were never scheduled...")
|
||||
for _, p := range additionalPods {
|
||||
pod, err := cs.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error in getting Pod %v/%v info: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
if len(pod.Spec.NodeName) > 0 {
|
||||
t.Errorf("Pod %v/%v is already scheduled", p.Namespace, p.Name)
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
if cond != nil && cond.Status != v1.ConditionFalse {
|
||||
t.Errorf("Pod %v/%v is no longer unschedulable: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Cleanup
|
||||
klog.Info("Cleaning up all pods...")
|
||||
allPods := additionalPods
|
||||
allPods = append(allPods, initialPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
cleanupPods(cs, t, allPods)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNominatedNodeCleanUp checks that when there are nominated pods on a
|
||||
// node and a higher priority pod is nominated to run on the node, the nominated
|
||||
// node name of the lower priority pods is cleared.
|
||||
@ -514,7 +628,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
@ -542,7 +656,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
Priority: &mediumPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI)},
|
||||
},
|
||||
})
|
||||
medPriPod, err := createPausePod(cs, podConf)
|
||||
@ -560,7 +674,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
})
|
||||
highPriPod, err := createPausePod(cs, podConf)
|
||||
@ -625,12 +739,12 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
@ -682,7 +796,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
@ -720,7 +834,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{1: {}},
|
||||
@ -800,7 +914,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)},
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI)},
|
||||
},
|
||||
}),
|
||||
// The third node is chosen because PDB is not violated for node 3 and the victims have lower priority than node-2.
|
||||
|
13
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/integration/scheduler/util.go
generated
vendored
@ -587,6 +587,17 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
|
||||
}
|
||||
}
|
||||
|
||||
// podUnschedulable returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status.
|
||||
func podSchedulableCondition(c clientset.Interface, podNamespace, podName string) (*v1.PodCondition, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond, nil
|
||||
}
|
||||
|
||||
// podUnschedulable returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status.
|
||||
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
@ -696,7 +707,7 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout,
|
||||
if err := wait.Poll(time.Millisecond, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user