mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/e2e/node/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/node/BUILD
generated
vendored
@ -18,7 +18,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/node",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go
generated
vendored
@ -28,9 +28,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -123,7 +123,7 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pod-nfs-vol-",
|
||||
@ -319,7 +319,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
NodeSelector: nodeLabels,
|
||||
})).NotTo(HaveOccurred())
|
||||
@ -334,7 +334,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
}
|
||||
|
||||
By("Deleting the RC")
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
|
||||
// Check that the pods really are gone by querying /runningpods on the
|
||||
// node. The /runningpods handler checks the container runtime (or its
|
||||
// cache) and returns a list of running pods. Some possible causes of
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -74,7 +75,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: totalPods,
|
||||
})).NotTo(HaveOccurred())
|
||||
|
||||
@ -117,7 +118,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
||||
verifyCPULimits(expectedCPU, cpuSummary)
|
||||
|
||||
By("Deleting the RC")
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
|
||||
}
|
||||
|
||||
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func preparePod(name string, node *v1.Node, propagation v1.MountPropagationMode, hostDir string) *v1.Pod {
|
||||
func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
|
||||
const containerName = "cntr"
|
||||
bTrue := true
|
||||
var oneSecond int64 = 1
|
||||
@ -49,7 +49,7 @@ func preparePod(name string, node *v1.Node, propagation v1.MountPropagationMode,
|
||||
{
|
||||
Name: "host",
|
||||
MountPath: "/mnt/test",
|
||||
MountPropagation: &propagation,
|
||||
MountPropagation: propagation,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
@ -105,12 +105,19 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
}()
|
||||
|
||||
podClient := f.PodClient()
|
||||
master := podClient.CreateSync(preparePod("master", node, v1.MountPropagationBidirectional, hostDir))
|
||||
slave := podClient.CreateSync(preparePod("slave", node, v1.MountPropagationHostToContainer, hostDir))
|
||||
bidirectional := v1.MountPropagationBidirectional
|
||||
master := podClient.CreateSync(preparePod("master", node, &bidirectional, hostDir))
|
||||
|
||||
hostToContainer := v1.MountPropagationHostToContainer
|
||||
slave := podClient.CreateSync(preparePod("slave", node, &hostToContainer, hostDir))
|
||||
|
||||
none := v1.MountPropagationNone
|
||||
private := podClient.CreateSync(preparePod("private", node, &none, hostDir))
|
||||
defaultPropagation := podClient.CreateSync(preparePod("default", node, nil, hostDir))
|
||||
|
||||
// Check that the pods sees directories of each other. This just checks
|
||||
// that they have the same HostPath, not the mount propagation.
|
||||
podNames := []string{master.Name, slave.Name}
|
||||
podNames := []string{master.Name, slave.Name, private.Name, defaultPropagation.Name}
|
||||
for _, podName := range podNames {
|
||||
for _, dirName := range podNames {
|
||||
cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName)
|
||||
@ -147,6 +154,10 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
"master": sets.NewString("master", "host"),
|
||||
// Slave sees master's mount + itself.
|
||||
"slave": sets.NewString("master", "slave", "host"),
|
||||
// Private sees only its own mount
|
||||
"private": sets.NewString("private"),
|
||||
// Default (=private) sees only its own mount
|
||||
"default": sets.NewString("default"),
|
||||
}
|
||||
dirNames := append(podNames, "host")
|
||||
for podName, mounts := range expectedMounts {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/node/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/node/pods.go
generated
vendored
@ -166,7 +166,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
deleted := false
|
||||
timeout := false
|
||||
var lastPod *v1.Pod
|
||||
timer := time.After(1 * time.Minute)
|
||||
timer := time.After(2 * time.Minute)
|
||||
for !deleted && !timeout {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
|
39
vendor/k8s.io/kubernetes/test/e2e/node/security_context.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e/node/security_context.go
generated
vendored
@ -81,6 +81,20 @@ var _ = SIGDescribe("Security Context [Feature:SecurityContext]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [Feature:RunAsGroup]", func() {
|
||||
pod := scTestPod(false, false)
|
||||
userID := int64(1001)
|
||||
groupID := int64(2002)
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
pod.Spec.SecurityContext.RunAsGroup = &groupID
|
||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
|
||||
|
||||
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
|
||||
fmt.Sprintf("uid=%v", userID),
|
||||
fmt.Sprintf("gid=%v", groupID),
|
||||
})
|
||||
})
|
||||
|
||||
It("should support container.SecurityContext.RunAsUser", func() {
|
||||
pod := scTestPod(false, false)
|
||||
userID := int64(1001)
|
||||
@ -95,6 +109,25 @@ var _ = SIGDescribe("Security Context [Feature:SecurityContext]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [Feature:RunAsGroup]", func() {
|
||||
pod := scTestPod(false, false)
|
||||
userID := int64(1001)
|
||||
groupID := int64(2001)
|
||||
overrideUserID := int64(1002)
|
||||
overrideGroupID := int64(2002)
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
pod.Spec.SecurityContext.RunAsGroup = &groupID
|
||||
pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext)
|
||||
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID
|
||||
pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID
|
||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"}
|
||||
|
||||
f.TestContainerOutput("pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{
|
||||
fmt.Sprintf("uid=%v", overrideUserID),
|
||||
fmt.Sprintf("gid=%v", overrideGroupID),
|
||||
})
|
||||
})
|
||||
|
||||
It("should support volume SELinux relabeling", func() {
|
||||
testPodSELinuxLabeling(f, false, false)
|
||||
})
|
||||
@ -111,7 +144,7 @@ var _ = SIGDescribe("Security Context [Feature:SecurityContext]", func() {
|
||||
// TODO: port to SecurityContext as soon as seccomp is out of alpha
|
||||
pod := scTestPod(false, false)
|
||||
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
|
||||
pod.Annotations[v1.SeccompPodAnnotationKey] = "docker/default"
|
||||
pod.Annotations[v1.SeccompPodAnnotationKey] = v1.SeccompProfileRuntimeDefault
|
||||
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
|
||||
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
|
||||
})
|
||||
@ -124,10 +157,10 @@ var _ = SIGDescribe("Security Context [Feature:SecurityContext]", func() {
|
||||
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
|
||||
})
|
||||
|
||||
It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() {
|
||||
It("should support seccomp alpha runtime/default annotation [Feature:Seccomp]", func() {
|
||||
// TODO: port to SecurityContext as soon as seccomp is out of alpha
|
||||
pod := scTestPod(false, false)
|
||||
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
|
||||
pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = v1.SeccompProfileRuntimeDefault
|
||||
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
|
||||
f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
|
||||
})
|
||||
|
Reference in New Issue
Block a user