vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

83
vendor/k8s.io/kubernetes/test/e2e/common/BUILD generated vendored Normal file
View File

@ -0,0 +1,83 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"apparmor.go",
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
"downwardapi_volume.go",
"empty_dir.go",
"events.go",
"expansion.go",
"host_path.go",
"init_container.go",
"kubelet_etc_hosts.go",
"networking.go",
"pods.go",
"privileged.go",
"projected.go",
"secrets.go",
"secrets_volume.go",
"sysctl.go",
"util.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/common",
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

249
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go generated vendored Normal file
View File

@ -0,0 +1,249 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/gomega"
)
const (
appArmorProfilePrefix = "e2e-apparmor-test-"
appArmorAllowedPath = "/expect_allowed_write"
appArmorDeniedPath = "/expect_permission_denied"
loaderLabelKey = "name"
loaderLabelValue = "e2e-apparmor-loader"
)
// AppArmorDistros are distros with AppArmor support
var AppArmorDistros = []string{"gci", "ubuntu"}
func IsAppArmorSupported() bool {
return framework.NodeOSDistroIs(AppArmorDistros...)
}
func SkipIfAppArmorNotSupported() {
framework.SkipUnlessNodeOSDistroIs(AppArmorDistros...)
}
func LoadAppArmorProfiles(f *framework.Framework) {
createAppArmorProfileCM(f)
createAppArmorProfileLoader(f)
}
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
// a single test, otherwise it will repeat the test every 1 second until failure.
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *api.Pod {
profile := "localhost/" + appArmorProfilePrefix + f.Namespace.Name
testCmd := fmt.Sprintf(`
if touch %[1]s; then
echo "FAILURE: write to %[1]s should be denied"
exit 1
elif ! touch %[2]s; then
echo "FAILURE: write to %[2]s should be allowed"
exit 2
elif [[ $(< /proc/self/attr/current) != "%[3]s" ]]; then
echo "FAILURE: not running with expected profile %[3]s"
echo "found: $(cat /proc/self/attr/current)"
exit 3
fi`, appArmorDeniedPath, appArmorAllowedPath, appArmorProfilePrefix+f.Namespace.Name)
if unconfined {
profile = apparmor.ProfileNameUnconfined
testCmd = `
if cat /proc/sysrq-trigger 2>&1 | grep 'Permission denied'; then
echo 'FAILURE: reading /proc/sysrq-trigger should be allowed'
exit 1
elif [[ $(< /proc/self/attr/current) != "unconfined" ]]; then
echo 'FAILURE: not running with expected profile unconfined'
exit 2
fi`
}
if !runOnce {
testCmd = fmt.Sprintf(`while true; do
%s
sleep 1
done`, testCmd)
}
loaderAffinity := &api.Affinity{
PodAffinity: &api.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{
Namespaces: []string{f.Namespace.Name},
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{loaderLabelKey: loaderLabelValue},
},
TopologyKey: "kubernetes.io/hostname",
}},
},
}
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-apparmor-",
Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
},
Labels: map[string]string{
"test": "apparmor",
},
},
Spec: api.PodSpec{
Affinity: loaderAffinity,
Containers: []api.Container{{
Name: "test",
Image: busyboxImage,
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,
},
}
if runOnce {
pod = f.PodClient().Create(pod)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name))
var err error
pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
} else {
pod = f.PodClient().CreateSync(pod)
framework.ExpectNoError(f.WaitForPodReady(pod.Name))
}
// Verify Pod affinity colocated the Pods.
loader := getRunningLoaderPod(f)
Expect(pod.Spec.NodeName).To(Equal(loader.Spec.NodeName))
return pod
}
func createAppArmorProfileCM(f *framework.Framework) {
profileName := appArmorProfilePrefix + f.Namespace.Name
profile := fmt.Sprintf(`#include <tunables/global>
profile %s flags=(attach_disconnected) {
#include <abstractions/base>
file,
deny %s w,
audit %s w,
}
`, profileName, appArmorDeniedPath, appArmorAllowedPath)
cm := &api.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "apparmor-profiles",
Namespace: f.Namespace.Name,
},
Data: map[string]string{
profileName: profile,
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap")
}
func createAppArmorProfileLoader(f *framework.Framework) {
True := true
One := int32(1)
loader := &api.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "apparmor-loader",
Namespace: f.Namespace.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: &One,
Template: &api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{loaderLabelKey: loaderLabelValue},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "apparmor-loader",
Image: "gcr.io/google_containers/apparmor-loader:0.1",
Args: []string{"-poll", "10s", "/profiles"},
SecurityContext: &api.SecurityContext{
Privileged: &True,
},
VolumeMounts: []api.VolumeMount{{
Name: "sys",
MountPath: "/sys",
ReadOnly: true,
}, {
Name: "apparmor-includes",
MountPath: "/etc/apparmor.d",
ReadOnly: true,
}, {
Name: "profiles",
MountPath: "/profiles",
ReadOnly: true,
}},
}},
Volumes: []api.Volume{{
Name: "sys",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/sys",
},
},
}, {
Name: "apparmor-includes",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/etc/apparmor.d",
},
},
}, {
Name: "profiles",
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{
Name: "apparmor-profiles",
},
},
},
}},
},
},
},
}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(loader)
framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController")
// Wait for loader to be ready.
getRunningLoaderPod(f)
}
func getRunningLoaderPod(f *framework.Framework) *api.Pod {
label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod")
pod := &pods.Items[0]
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod")
return pod
}

View File

@ -0,0 +1,525 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"context"
"fmt"
"strconv"
"sync"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80
targetPort = 8080
timeoutRC = 120 * time.Second
startServiceTimeout = time.Minute
startServiceInterval = 5 * time.Second
rcIsNil = "ERROR: replicationController = nil"
deploymentIsNil = "ERROR: deployment = nil"
rsIsNil = "ERROR: replicaset = nil"
invalidKind = "ERROR: invalid workload kind for resource consumer"
customMetricName = "QPS"
serviceInitializationTimeout = 2 * time.Minute
serviceInitializationInterval = 15 * time.Second
)
var (
resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer)
resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController)
)
var (
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
subresource = "scale"
)
/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
rc.ConsumeCPU(300)
// ... check your assumption here
*/
type ResourceConsumer struct {
name string
controllerName string
kind schema.GroupVersionKind
nsName string
clientSet clientset.Interface
internalClientset *internalclientset.Clientset
cpu chan int
mem chan int
customMetric chan int
stopCPU chan int
stopMem chan int
stopCustomMetric chan int
stopWaitGroup sync.WaitGroup
consumptionTimeInSeconds int
sleepTime time.Duration
requestSizeInMillicores int
requestSizeInMegabytes int
requestSizeCustomMetric int
}
func GetResourceConsumerImage() string {
return resourceConsumerImage
}
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
}
// TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset)
}
/*
NewResourceConsumer creates new ResourceConsumer
initCPUTotal argument is in millicores
initMemoryTotal argument is in megabytes
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{
name: name,
controllerName: name + "-ctrl",
kind: kind,
nsName: nsName,
clientSet: clientset,
internalClientset: internalClientset,
cpu: make(chan int),
mem: make(chan int),
customMetric: make(chan int),
stopCPU: make(chan int),
stopMem: make(chan int),
stopCustomMetric: make(chan int),
consumptionTimeInSeconds: consumptionTimeInSeconds,
sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second,
requestSizeInMillicores: requestSizeInMillicores,
requestSizeInMegabytes: requestSizeInMegabytes,
requestSizeCustomMetric: requestSizeCustomMetric,
}
go rc.makeConsumeCPURequests()
rc.ConsumeCPU(initCPUTotal)
go rc.makeConsumeMemRequests()
rc.ConsumeMem(initMemoryTotal)
go rc.makeConsumeCustomMetric()
rc.ConsumeCustomMetric(initCustomMetric)
return rc
}
// ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
rc.cpu <- millicores
}
// ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
rc.mem <- megabytes
}
// ConsumeMem consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
}
func (rc *ResourceConsumer) makeConsumeCPURequests() {
defer GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
millicores := 0
for {
select {
case millicores = <-rc.cpu:
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
rc.sendConsumeCPURequest(millicores)
sleepTime = rc.sleepTime
case <-rc.stopCPU:
framework.Logf("RC %s: stopping CPU consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) makeConsumeMemRequests() {
defer GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
megabytes := 0
for {
select {
case megabytes = <-rc.mem:
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
rc.sendConsumeMemRequest(megabytes)
sleepTime = rc.sleepTime
case <-rc.stopMem:
framework.Logf("RC %s: stopping mem consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) makeConsumeCustomMetric() {
defer GinkgoRecover()
rc.stopWaitGroup.Add(1)
defer rc.stopWaitGroup.Done()
sleepTime := time.Duration(0)
delta := 0
for {
select {
case delta := <-rc.customMetric:
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
rc.sendConsumeCustomMetric(delta)
sleepTime = rc.sleepTime
case <-rc.stopCustomMetric:
framework.Logf("RC %s: stopping metric consumer", rc.name)
return
}
}
}
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeCPU").
Param("millicores", strconv.Itoa(millicores)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
framework.Logf("ConsumeCPU URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeCPU failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// sendConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeMem").
Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
framework.Logf("ConsumeMem URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeMem failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
// sendConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Name(rc.controllerName).
Suffix("BumpMetric").
Param("metric", customMetricName).
Param("delta", strconv.Itoa(delta)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
framework.Logf("ConsumeCustomMetric failure: %v", err)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
}
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.ReadyReplicas)
case KindDeployment:
deployment, err := rc.clientSet.ExtensionsV1beta1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.ReadyReplicas)
case KindReplicaSet:
rs, err := rc.clientSet.ExtensionsV1beta1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.ReadyReplicas)
default:
framework.Failf(invalidKind)
}
return 0
}
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
if replicas != desiredReplicas {
return false, fmt.Errorf("number of replicas changed unexpectedly")
} else {
return false, nil // Expected number of replicas found. Continue polling until timeout.
}
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if err == wait.ErrWaitTimeout {
framework.Logf("Number of replicas was stable over %v", duration)
return
}
framework.ExpectNoErrorWithOffset(1, err)
}
// Pause stops background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Pause() {
By(fmt.Sprintf("HPA pausing RC %s", rc.name))
rc.stopCPU <- 0
rc.stopMem <- 0
rc.stopCustomMetric <- 0
rc.stopWaitGroup.Wait()
}
// Pause starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
go rc.makeConsumeMemRequests()
go rc.makeConsumeCustomMetric()
}
func (rc *ResourceConsumer) CleanUp() {
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
close(rc.stopMem)
close(rc.stopCustomMetric)
rc.stopWaitGroup.Wait()
// Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second)
kind := rc.kind.GroupKind()
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": name,
},
},
})
framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuLimitMillis,
CpuLimit: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024,
}
switch kind {
case KindRC:
framework.ExpectNoError(framework.RunRC(rcConfig))
break
case KindDeployment:
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
break
case KindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{
RCConfig: rcConfig,
}
By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
break
default:
framework.Failf(invalidKind)
}
By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": controllerName,
},
},
})
framework.ExpectNoError(err)
dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{
Client: c,
Image: resourceConsumerControllerImage,
Name: controllerName,
Namespace: ns,
Timeout: timeoutRC,
Replicas: 1,
Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst,
}
framework.ExpectNoError(framework.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
TargetCPUUtilizationPercentage: &cpu,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
framework.ExpectNoError(errHPA)
return hpa
}
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
}

138
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-in-env-field
Description: Make sure config map value can be used as an environment
variable in the container (on container.env field)
*/
framework.ConformanceIt("should be consumable via environment variable ", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
/*
Testname: configmap-envfrom-field
Description: Make sure config map value can be used as an source for
environment variables in the container (on container.envFrom field)
*/
framework.ConformanceIt("should be consumable via the environment ", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newEnvFromConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"data_1=value-1", "data_2=value-2", "data_3=value-3",
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
})
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data_1": "value-1",
"data_2": "value-2",
"data_3": "value-3",
},
}
}

View File

@ -0,0 +1,629 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = Describe("[sig-storage] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-nomap-simple
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with no other settings.
*/
framework.ConformanceIt("should be consumable from pods in volume ", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: configmap-nomap-default-mode
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with defaultMode set
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
})
/*
Testname: configmap-nomap-user
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod as non-root.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root ", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
/*
Testname: configmap-simple-mapped
Description: Make sure config map works by mounting it to a volume with
a custom path (mapping) on the pod with no other settings and make sure
the pod actually consumes it.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: configmap-with-item-mode-mapped
Description: Make sure config map works with an item mode (e.g. 0400)
for the config map item.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() {
mode := int32(0400)
doConfigMapE2EWithMappings(f, 0, 0, &mode)
})
/*
Testname: configmap-simple-user-mapped
Description: Make sure config map works when it is mounted as non-root.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root ", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup]", func() {
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
/*
Testname: configmap-update-test
Description: Make sure update operation is working on config map and
the result is observed on volumes mounted in containers.
*/
framework.ConformanceIt("updates should be reflected in volume ", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "configmap-volume"
volumeMountPath := "/etc/configmap-volume"
containerName := "configmap-volume-test"
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
/*
Testname: configmap-CUD-test
Description: Make sure Create, Update, Delete operations are all working
on config map and the result is observed on volumes mounted in containers.
*/
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/configmap-volumes"
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
deleteContainerName := "delcm-volume-test"
deleteVolumeName := "deletecm-volume"
deleteConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: deleteName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
updateContainerName := "updcm-volume-test"
updateVolumeName := "updatecm-volume"
updateConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: updateName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "createcm-volume-test"
createVolumeName := "createcm-volume"
createConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: createName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: deleteVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: deleteName,
},
Optional: &trueVal,
},
},
},
{
Name: updateVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: updateName,
},
Optional: &trueVal,
},
},
},
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: createName,
},
Optional: &trueVal,
},
},
},
},
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
MountPath: path.Join(volumeMountPath, "delete"),
ReadOnly: true,
},
},
},
{
Name: updateContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
MountPath: path.Join(volumeMountPath, "update"),
ReadOnly: true,
},
},
},
{
Name: createContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
})
/*
Testname: configmap-multiple-volumes
Description: Make sure config map works when it mounted as two different
volumes on the same node.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod ", func() {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
volumeName2 = "configmap-volume-2"
volumeMountPath2 = "/etc/configmap-volume-2"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
})
})
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
userID := int64(uid)
groupID := int64(fsGroup)
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
one := int64(1)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &one,
},
}
if userID != 0 {
pod.Spec.SecurityContext.RunAsUser = &userID
}
if groupID != 0 {
pod.Spec.SecurityContext.FSGroup = &groupID
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
output := []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
"mode of file \"/etc/configmap-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
userID := int64(uid)
groupID := int64(fsGroup)
var (
name = "configmap-test-volume-map-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
one := int64(1)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: []v1.KeyToPath{
{
Key: "data-2",
Path: "path/to/data-2",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &one,
},
}
if userID != 0 {
pod.Spec.SecurityContext.RunAsUser = &userID
}
if groupID != 0 {
pod.Spec.SecurityContext.FSGroup = &groupID
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
} else {
mode := int32(0644)
itemMode = &mode
}
// Just check file mode if fsGroup is not set. If fsGroup is set, the
// final mode is adjusted and we are not testing that case.
output := []string{
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
}
if fsGroup == 0 {
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
output = append(output, "mode of file \"/etc/configmap-volume/path/to/data-2\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}

View File

@ -0,0 +1,448 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
probTestContainerName = "test-webserver"
probTestInitialDelaySeconds = 15
defaultObservationTimeout = time.Minute * 2
)
var _ = framework.KubeDescribe("Probing container", func() {
f := framework.NewDefaultFramework("container-probe")
var podClient *framework.PodClient
probe := webserverProbeBuilder{}
BeforeEach(func() {
podClient = f.PodClient()
})
/*
Testname: pods-readiness-probe-initial-delay
Description: Make sure that pod with readiness probe should not be
ready before initial delay and never restart.
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart ", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
f.WaitForPodReady(p.Name)
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
readyTime, err := getTransitionTimeForReadyCondition(p)
framework.ExpectNoError(err)
startedTime, err := getContainerStartedTime(p, probTestContainerName)
framework.ExpectNoError(err)
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay {
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
/*
Testname: pods-readiness-probe-failure
Description: Make sure that pod with readiness probe that fails should
never be ready and never restart.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart ", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
/*
Testname: pods-cat-liveness-probe-restarted
Description: Make sure the pod is restarted with a cat /tmp/health
liveness probe.
*/
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
/*
Testname: pods-cat-liveness-probe-not-restarted
Description: Make sure the pod is not restarted with a cat /tmp/health
liveness probe.
*/
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
/*
Testname: pods-http-liveness-probe-restarted
Description: Make sure when http liveness probe fails, the pod should
be restarted.
*/
framework.ConformanceIt("should be restarted with a /healthz http liveness probe ", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.Liveness),
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
// Slow by design (5 min)
/*
Testname: pods-restart-count
Description: Make sure when a pod gets restarted, its start count
should increase.
*/
framework.ConformanceIt("should have monotonically increasing restart count [Slow]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.Liveness),
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 5, time.Minute*5)
})
/*
Testname: pods-http-liveness-probe-not-restarted
Description: Make sure when http liveness probe succeeds, the pod
should not be restarted.
*/
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe ", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(80),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
/*
Testname: pods-docker-liveness-probe-timeout
Description: Make sure that the pod is restarted with a docker exec
liveness probe with timeout.
*/
framework.ConformanceIt("should be restarted with a docker exec liveness probe with timeout ", func() {
// TODO: enable this test once the default exec handler supports timeout.
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 10"},
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 1,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
})
func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName {
continue
}
if status.State.Running == nil {
return time.Time{}, fmt.Errorf("Container is not running")
}
return status.State.Running.StartedAt.Time, nil
}
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
}
func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions {
if cond.Type == v1.PodReady {
return cond.LastTransitionTime.Time, nil
}
}
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
}
func getRestartCount(p *v1.Pod) int {
count := 0
for _, containerStatus := range p.Status.ContainerStatuses {
count += int(containerStatus.RestartCount)
}
return count
}
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: probTestContainerName,
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
},
},
},
}
return pod
}
type webserverProbeBuilder struct {
failing bool
initialDelay bool
}
func (b webserverProbeBuilder) withFailing() webserverProbeBuilder {
b.failing = true
return b
}
func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
b.initialDelay = true
return b
}
func (b webserverProbeBuilder) build() *v1.Probe {
probe := &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/",
},
},
}
if b.initialDelay {
probe.InitialDelaySeconds = probTestInitialDelaySeconds
}
if b.failing {
probe.HTTPGet.Port = intstr.FromInt(81)
}
return probe
}
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
/*
Testname: container-without-command-args
Description: When a Pod is created neither 'command' nor 'args' are
provided for a Container, ensure that the docker image's default
command and args are used.
*/
framework.ConformanceIt("should use the image defaults if command and args are blank ", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
"[/ep default arguments]",
})
})
/*
Testname: container-with-args
Description: When a Pod is created and 'args' are provided for a
Container, ensure that they take precedent to the docker image's
default arguments, but that the default command is used.
*/
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) ", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override arguments", pod, 0, []string{
"[/ep override arguments]",
})
})
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
/*
Testname: container-with-command
Description: When a Pod is created and 'command' is provided for a
Container, ensure that it takes precedent to the docker image's default
command.
*/
framework.ConformanceIt("should be able to override the image's default commmand (docker entrypoint) ", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
f.TestContainerOutput("override command", pod, 0, []string{
"[/ep-2]",
})
})
/*
Testname: container-with-command-args
Description: When a Pod is created and 'command' and 'args' are
provided for a Container, ensure that they take precedent to the docker
image's default command and arguments.
*/
framework.ConformanceIt("should be able to override the image's default command and arguments ", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override all", pod, 0, []string{
"[/ep-2 override arguments]",
})
})
})
const testContainerName = "test-container"
// Return a prototypical entrypoint test pod
func entrypointTestPod() *v1.Pod {
podName := "client-containers-" + string(uuid.NewUUID())
one := int64(1)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: testContainerName,
Image: imageutils.GetE2EImage(imageutils.EntrypointTester),
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &one,
},
}
}

View File

@ -0,0 +1,382 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var (
hostIPVersion = utilversion.MustParseSemantic("v1.8.0")
podUIDVersion = utilversion.MustParseSemantic("v1.8.0")
)
var _ = Describe("[sig-api-machinery] Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
/*
Testname: downwardapi-env-name-namespace-podip
Description: Ensure that downward API can provide pod's name, namespace
and IP address as environment variables.
*/
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars ", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
}
expectations := []string{
fmt.Sprintf("POD_NAME=%v", podName),
fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
}
testDownwardAPI(f, podName, env, expectations)
})
/*
Testname: downwardapi-env-host-ip
Description: Ensure that downward API can provide an IP address for
host node as an environment variable.
*/
framework.ConformanceIt("should provide host IP as an env var ", func() {
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIP",
},
},
},
}
expectations := []string{
"HOST_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
}
testDownwardAPI(f, podName, env, expectations)
})
/*
Testname: downwardapi-env-limits-requests
Description: Ensure that downward API can provide CPU/memory limit
and CPU/memory request as environment variables.
*/
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars ", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
{
Name: "CPU_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.cpu",
},
},
},
{
Name: "MEMORY_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.memory",
},
},
},
}
expectations := []string{
"CPU_LIMIT=2",
"MEMORY_LIMIT=67108864",
"CPU_REQUEST=1",
"MEMORY_REQUEST=33554432",
}
testDownwardAPI(f, podName, env, expectations)
})
/*
Testname: downwardapi-env-default-allocatable
Description: Ensure that downward API can provide default node
allocatable values for CPU and memory as environment variables if CPU
and memory limits are not specified for a container.
*/
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable ", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
}
expectations := []string{
"CPU_LIMIT=[1-9]",
"MEMORY_LIMIT=[1-9]",
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
})
/*
Testname: downwardapi-env-pod-uid
Description: Ensure that downward API can provide pod UID as an
environment variable.
*/
framework.ConformanceIt("should provide pod UID as env vars ", func() {
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_UID",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.uid",
},
},
},
}
expectations := []string{
"POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}",
}
testDownwardAPI(f, podName, env, expectations)
})
})
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("downward-api")
Context("Downward API tests for local ephemeral storage", func() {
BeforeEach(func() {
framework.SkipUnlessLocalEphemeralStorageEnabled()
})
It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "EPHEMERAL_STORAGE_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.ephemeral-storage",
},
},
},
{
Name: "EPHEMERAL_STORAGE_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.ephemeral-storage",
},
},
},
}
expectations := []string{
fmt.Sprintf("EPHEMERAL_STORAGE_LIMIT=%d", 64*1024*1024),
fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024),
}
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
})
It("should provide default limits.ephemeral-storage from node allocatable", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "EPHEMERAL_STORAGE_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.ephemeral-storage",
},
},
},
}
expectations := []string{
"EPHEMERAL_STORAGE_LIMIT=[1-9]",
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
})
})
})
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
}
func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("64Mi"),
},
},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}

View File

@ -0,0 +1,474 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
/*
Testname: downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide podname only ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
/*
Testname: downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file premission
mode for DownwardAPIVolumeFiles if no mode is specified.
*/
framework.ConformanceIt("should set DefaultMode on files ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
/*
Testname: downwardapi-volume-set-mode
Description: Ensure that downward API can set file premission mode for
DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should set mode on item file ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--r-----",
})
})
/*
Testname: downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified.
*/
framework.ConformanceIt("should update labels on modification ", func() {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
podName := "labelsupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
})
/*
Testname: downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified.
*/
framework.ConformanceIt("should update annotations on modification ", func() {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
})
/*
Testname: downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU limit
through DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu limit ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
/*
Testname: downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory limit ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
/*
Testname: downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu request ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
/*
Testname: downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory request ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
/*
Testname: downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
limit is not specified for a container.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
Testname: downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set ", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
}
return pod
}
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
return pod
}
func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod
}
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod
}
func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: mountImage,
Command: []string{"/mounttest", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
}
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: mountImage,
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
}
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
return pod
}
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "podinfo",
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
})
}
if len(annotations) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
})
}
}
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771

465
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go generated vendored Normal file
View File

@ -0,0 +1,465 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
volumePath = "/test-volume"
)
var (
testImageRootUid = imageutils.GetE2EImage(imageutils.Mounttest)
testImageNonRootUid = imageutils.GetE2EImage(imageutils.MounttestUser)
)
var _ = Describe("[sig-storage] EmptyDir volumes", func() {
f := framework.NewDefaultFramework("emptydir")
Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() {
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("new files should be created with FSGroup ownership when container is non-root", func() {
doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
doTestSubPathFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
})
It("volume on tmpfs should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
})
/*
Testname: volume-emptydir-mode-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
mount type.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (root,0644,tmpfs)", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (root,0666,tmpfs)", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (root,0777,tmpfs)", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs)", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs)", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs)", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-mode
Description: For a Pod created with an 'emptyDir' Volume, ensure the
volume has 0777 unix file permissions.
*/
framework.ConformanceIt("volume on default medium should have the correct mode", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0644 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (root,0644,default)", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0666 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (root,0666,default)", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0777 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (root,0777,default)", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0644 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (non-root,0644,default)", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0666 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (non-root,0666,default)", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0777 unix file permissions is created and enforced
correctly.
*/
framework.ConformanceIt("should support (non-root,0777,default)", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
})
})
const (
containerName = "test-container"
volumeName = "test-volume"
)
func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0660=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
fmt.Sprintf("--file_owner=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw----",
"content of file \"/test-volume/test-file\": mount-tester new file",
"owner GID of \"/test-volume/test-file\": 123",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestSubPathFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
subPath = "test-sub"
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
fmt.Sprintf("--file_owner=%v", volumePath),
}
pod.Spec.Containers[0].VolumeMounts[0].SubPath = subPath
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir subpath on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
"owner UID of \"/test-volume\": 0",
"owner GID of \"/test-volume\": 123",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
fsGroup := int64(1001)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0666=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0777=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {
if medium == v1.StorageMediumMemory {
return "tmpfs"
}
return "node default medium"
}
func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
podName := "pod-" + string(uuid.NewUUID())
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: source,
},
},
},
},
}
}

152
vendor/k8s.io/kubernetes/test/e2e/common/events.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"sync"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type Action func() error
// Returns true if a node update matching the predicate was emitted from the
// system after performing the supplied action.
func ObserveNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*v1.Node) bool, action Action) (bool, error) {
observedMatchingNode := false
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
informerStartedChan := make(chan struct{})
var informerStartedGuard sync.Once
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
ls, err := f.ClientSet.CoreV1().Nodes().List(options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
options.FieldSelector = nodeSelector.String()
w, err := f.ClientSet.CoreV1().Nodes().Watch(options)
return w, err
},
},
&v1.Node{},
0,
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true))
if nodePredicate(n) {
observedMatchingNode = true
}
},
},
)
// Start the informer and block this goroutine waiting for the started signal.
informerStopChan := make(chan struct{})
defer func() { close(informerStopChan) }()
go controller.Run(informerStopChan)
<-informerStartedChan
// Invoke the action function.
err := action()
if err != nil {
return false, err
}
// Poll whether the informer has found a matching node update with a timeout.
// Wait up 2 minutes polling every second.
timeout := 2 * time.Minute
interval := 1 * time.Second
err = wait.Poll(interval, timeout, func() (bool, error) {
return observedMatchingNode, nil
})
return err == nil, err
}
// Returns true if an event matching the predicate was emitted from the system
// after performing the supplied action.
func ObserveEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Event) bool, action Action) (bool, error) {
observedMatchingEvent := false
informerStartedChan := make(chan struct{})
var informerStartedGuard sync.Once
// Create an informer to list/watch events from the test framework namespace.
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
ls, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
w, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Watch(options)
return w, err
},
},
&v1.Event{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e, ok := obj.(*v1.Event)
By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message))
Expect(ok).To(Equal(true))
if ok && eventPredicate(e) {
observedMatchingEvent = true
}
},
},
)
// Start the informer and block this goroutine waiting for the started signal.
informerStopChan := make(chan struct{})
defer func() { close(informerStopChan) }()
go controller.Run(informerStopChan)
<-informerStartedChan
// Invoke the action function.
err := action()
if err != nil {
return false, err
}
// Poll whether the informer has found a matching event with a timeout.
// Wait up 2 minutes polling every second.
timeout := 2 * time.Minute
interval := 1 * time.Second
err = wait.Poll(interval, timeout, func() (bool, error) {
return observedMatchingEvent, nil
})
return err == nil, err
}

147
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
// These tests exercise the Kubernetes expansion syntax $(VAR).
// For more information, see:
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/expansion.md
var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
/*
Testname: var-expansion-env
Description: Make sure environment variables can be set using an
expansion of previously defined environment variables
*/
framework.ConformanceIt("should allow composing env vars into new env vars ", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "FOO",
Value: "foo-value",
},
{
Name: "BAR",
Value: "bar-value",
},
{
Name: "FOOBAR",
Value: "$(FOO);;$(BAR)",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
})
})
/*
Testname: var-expansion-command
Description: Make sure a container's commands can be set using an
expansion of environment variables.
*/
framework.ConformanceIt("should allow substituting values in a container's command ", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's command", pod, 0, []string{
"test-value",
})
})
/*
Testname: var-expansion-arg
Description: Make sure a container's args can be set using an
expansion of environment variables.
*/
framework.ConformanceIt("should allow substituting values in a container's args ", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's args", pod, 0, []string{
"test-value",
})
})
})

269
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go generated vendored Normal file
View File

@ -0,0 +1,269 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
//TODO : Consolidate this code with the code for emptyDir.
//This will require some smart.
var _ = Describe("[sig-storage] HostPath", func() {
f := framework.NewDefaultFramework("hostpath")
BeforeEach(func() {
// TODO permission denied cleanup failures
//cleanup before running the test.
_ = os.Remove("/tmp/test-file")
})
/*
Testname: volume-hostpath-mode
Description: For a Pod created with a 'HostPath' Volume, ensure the
volume is a directory with 0777 unix file permissions and that is has
the sticky bit (mode flag t) set.
*/
framework.ConformanceIt("should give a volume the correct mode", func() {
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
f.TestContainerOutput("hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dtrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
// This test requires mounting a folder into a container with write privileges.
It("should support r/w", func() {
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_mode=%v", filePath),
}
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePath),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
It("should support subPath", func() {
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
// Write the file in the subPath from container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
fmt.Sprintf("--file_mode=%v", filePathInWriter),
}
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
It("should support existing directory subPath", func() {
framework.SkipUnlessSSHKeyPresent()
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
pod.Spec.NodeName = nodeList.Items[0].Name
// Create the subPath directory on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
framework.Failf("mkdir returned non-zero")
}
// Write the file in the subPath from container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
fmt.Sprintf("--file_mode=%v", filePathInWriter),
}
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
// TODO consolidate common code of this test and above
It("should support existing single file subPath", func() {
framework.SkipUnlessSSHKeyPresent()
subPath := "sub-path-test-file"
retryDuration := 180
filePathInReader := path.Join(volumePath, subPath)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
pod.Spec.NodeName = nodeList.Items[0].Name
// Create the subPath file on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
framework.Failf("echo returned non-zero")
}
// Mount the file to the subPath in container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
})
//These constants are borrowed from the other test.
//const volumeName = "test-volume"
const containerName1 = "test-container-1"
const containerName2 = "test-container-2"
func mount(source *v1.HostPathVolumeSource) []v1.Volume {
return []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: source,
},
},
}
}
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
podName := "pod-host-path-test"
privileged := true
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
{
Name: containerName2,
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: mount(source),
},
}
}

View File

@ -0,0 +1,380 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("InitContainer", func() {
f := framework.NewDefaultFramework("init-container")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
},
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should invoke init containers on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[1]
if status.State.Waiting == nil {
return false, fmt.Errorf("second init container should not be out of waiting: %#v", status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
status = t.Status.InitContainerStatuses[0]
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
// continue until we see an attempt to restart the pod
return status.LastTerminationState.Terminated != nil, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
// verify we get two restarts
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
status := t.Status.InitContainerStatuses[0]
if status.RestartCount < 3 {
return false, nil
}
framework.Logf("init container has failed twice: %#v", t)
// TODO: more conditions
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[0]
if status.State.Terminated == nil {
if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
return false, nil
}
if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
status = t.Status.InitContainerStatuses[1]
if status.State.Terminated == nil {
return false, nil
}
if status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("second init container should have failed: %#v", status)
}
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
conditions.PodCompleted,
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
})
})

View File

@ -0,0 +1,229 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
etcHostsPodName = "test-pod"
etcHostsHostNetworkPodName = "test-host-network-pod"
etcHostsPartialContent = "# Kubernetes-managed hosts file."
)
var etcHostsImageName = imageutils.GetE2EImage(imageutils.Netexec)
type KubeletManagedHostConfig struct {
hostNetworkPod *v1.Pod
pod *v1.Pod
f *framework.Framework
}
var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts")
config := &KubeletManagedHostConfig{
f: f,
}
/*
Testname: kubelet-managed-etc-hosts
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
it into the container.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file ", func() {
By("Setting up the test")
config.setup()
By("Running the test")
config.verifyEtcHosts()
})
})
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
}
func (config *KubeletManagedHostConfig) setup() {
By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork()
By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork()
}
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = config.f.PodClient().CreateSync(podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
}
func assertManagedStatus(
config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
// TODO: workaround for https://github.com/kubernetes/kubernetes/issues/34256
//
// Retry until timeout for the contents of /etc/hosts to show
// up. Note: if /etc/hosts is properly mounted, then this will
// succeed immediately.
const retryTimeout = 30 * time.Second
retryCount := 0
etcHostsContent := ""
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
etcHostsContent = config.getEtcHostsContent(podName, name)
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
if expectedIsManaged == isManaged {
return
}
glog.Warningf(
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
retryCount++
time.Sleep(100 * time.Millisecond)
}
if expectedIsManaged {
framework.Failf(
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
} else {
framework.Failf(
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
}
}
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
}
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-3",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "host-etc-hosts",
MountPath: "/etc/hosts",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "host-etc-hosts",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts",
Type: hostPathType,
},
},
},
},
},
}
return pod
}
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
},
},
}
return pod
}

81
vendor/k8s.io/kubernetes/test/e2e/common/networking.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = Describe("[sig-network] Networking", func() {
f := framework.NewDefaultFramework("pod-network-test")
Describe("Granular Checks: Pods", func() {
// Try to hit all endpoints through a test container, retry 5 times,
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
/*
Testname: networking-intra-pod-http
Description: Try to hit test endpoints from a test container and make
sure each of them can report a unique hostname.
*/
framework.ConformanceIt("should function for intra-pod communication: http ", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
/*
Testname: networking-intra-pod-udp
Description: Try to hit test endpoints from a test container using udp
and make sure each of them can report a unique hostname.
*/
framework.ConformanceIt("should function for intra-pod communication: udp ", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
/*
Testname: networking-node-pod-http
Description: Try to hit test endpoints from the pod and make sure each
of them can report a unique hostname.
*/
framework.ConformanceIt("should function for node-pod communication: http ", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
/*
Testname: networking-node-pod-udp
Description: Try to hit test endpoints from the pod using udp and make sure
each of them can report a unique hostname.
*/
framework.ConformanceIt("should function for node-pod communication: udp ", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
})
})

691
vendor/k8s.io/kubernetes/test/e2e/common/pods.go generated vendored Normal file
View File

@ -0,0 +1,691 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var (
buildBackOffDuration = time.Minute
syncLoopFrequency = 10 * time.Second
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
)
// testHostIP tests that a pod gets a host IP
func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
By("creating pod")
podClient.CreateSync(pod)
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
Expect(pod.Spec.Containers).NotTo(BeEmpty())
podName := pod.Name
containerName := pod.Spec.Containers[0].Name
By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
return delay1, delay2
}
func getRestartDelay(podClient *framework.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now()
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second)
pod, err := podClient.Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
framework.Logf("getRestartDelay: status missing")
continue
}
if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) {
startedAt := status.State.Running.StartedAt.Time
finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
return startedAt.Sub(finishedAt), nil
}
}
return 0, fmt.Errorf("timeout getting pod restart delay")
}
var _ = framework.KubeDescribe("Pods", func() {
f := framework.NewDefaultFramework("pods")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
/*
Testname: pods-created-pod-assigned-hostip
Description: Make sure when a pod is created that it is assigned a host IP
Address.
*/
framework.ConformanceIt("should get a host IP ", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.ClientSet),
},
},
},
})
})
/*
Testname: pods-submitted-removed
Description: Makes sure a pod is created, a watch can be setup for the pod,
pod creation was observed, pod is deleted, and pod deletion is observed.
*/
framework.ConformanceIt("should be submitted and removed ", func() {
By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
},
},
},
}
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
framework.Failf("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
framework.Logf("running pod: %#v", pod)
By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil
}
for _, kubeletPod := range podList.Items {
if pod.Name != kubeletPod.Name {
continue
}
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
}
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
By("verifying pod deletion was observed")
deleted := false
var lastPod *v1.Pod
timer := time.After(framework.DefaultPodDeletionTimeout)
for !deleted {
select {
case event, _ := <-w.ResultChan():
switch event.Type {
case watch.Deleted:
lastPod = event.Object.(*v1.Pod)
deleted = true
case watch.Error:
framework.Logf("received a watch error: %v", event.Object)
framework.Failf("watch closed with error")
}
case <-timer:
framework.Failf("timed out waiting for pod deletion")
}
}
if !deleted {
framework.Failf("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
})
/*
Testname: pods-updated-successfully
Description: Make sure it is possible to successfully update a pod's labels.
*/
framework.ConformanceIt("should be updated ", func() {
By("creating the pod")
name := "pod-update-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
},
},
},
}
By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
framework.Logf("Pod update OK")
})
/*
Testname: pods-update-active-deadline-seconds
Description: Make sure it is possible to create a pod, update its
activeDeadlineSecondsValue, and then waits for the deadline to pass
and verifies the pod is terminated.
*/
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated ", func() {
By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
},
},
},
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline
})
framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
})
/*
Testname: pods-contain-services-environment-variables
Description: Make sure that when a pod is created it contains environment
variables for each active service.
*/
framework.ConformanceIt("should contain environment variables for services ", func() {
// Make a pod that will be a service.
// This pod serves its hostname via HTTP.
serverName := "server-envvars-" + string(uuid.NewUUID())
serverPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: serverName,
Labels: map[string]string{"name": serverName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "srv",
Image: framework.ServeHostnameImage,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
}
podClient.CreateSync(serverPod)
// This service exposes port 8080 of the test pod as a service on port 8765
// TODO(filbranden): We would like to use a unique service name such as:
// svcName := "svc-envvars-" + randomSuffix()
// However, that affects the name of the environment variables which are the capitalized
// service name, so that breaks this test. One possibility is to tweak the variable names
// to match the service. Another is to rethink environment variable names and possibly
// allow overriding the prefix in the service manifest.
svcName := "fooservice"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Labels: map[string]string{
"name": svcName,
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 8765,
TargetPort: intstr.FromInt(8080),
}},
Selector: map[string]string{
"name": serverName,
},
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
Expect(err).NotTo(HaveOccurred(), "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID())
const containerName = "env3cont"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
// It's possible for the Pod to be created before the Kubelet is updated with the new
// service. In that case, we just retry.
const maxRetries = 3
expectedVars := []string{
"FOOSERVICE_SERVICE_HOST=",
"FOOSERVICE_SERVICE_PORT=",
"FOOSERVICE_PORT=",
"FOOSERVICE_PORT_8765_TCP_PORT=",
"FOOSERVICE_PORT_8765_TCP_PROTO=",
"FOOSERVICE_PORT_8765_TCP=",
"FOOSERVICE_PORT_8765_TCP_ADDR=",
}
framework.ExpectNoErrorWithRetries(func() error {
return f.MatchContainerOutput(pod, containerName, expectedVars, ContainSubstring)
}, maxRetries, "Container should have service environment variables set")
})
It("should support remote command execution over websockets", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
},
},
},
}
By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).
Suffix("exec").
Param("stderr", "1").
Param("stdout", "1").
Param("container", pod.Spec.Containers[0].Name).
Param("command", "cat").
Param("command", "/etc/resolv.conf")
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
buf := &bytes.Buffer{}
Eventually(func() error {
for {
var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil {
if err == io.EOF {
break
}
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(msg) == 0 {
continue
}
if msg[0] != 1 {
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
}
buf.Write(msg[1:])
}
if buf.Len() == 0 {
return fmt.Errorf("Unexpected output from server")
}
if !strings.Contains(buf.String(), "nameserver") {
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
}
return nil
}, time.Minute, 10*time.Second).Should(BeNil())
})
It("should support retrieving logs from the container over websockets", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
},
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).
Suffix("log").
Param("container", pod.Spec.Containers[0].Name)
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
buf := &bytes.Buffer{}
for {
var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil {
if err == io.EOF {
break
}
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(strings.TrimSpace(string(msg))) == 0 {
continue
}
buf.Write(msg)
}
if buf.String() != "container is alive\n" {
framework.Failf("Unexpected websocket logs:\n%s", buf.String())
}
})
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
podName := "pod-back-off-image"
containerName := "back-off"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "back-off-image"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
},
}
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
})
time.Sleep(syncLoopFrequency)
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
}
})
// Slow issue #19027 (20 mins)
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
podName := "back-off-cap"
containerName := "back-off-cap"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
},
}
podClient.CreateSync(pod)
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
// wait for a delay == capped delay of MaxContainerBackOff
By("geting restart delay when capped")
var (
delay1 time.Duration
err error
)
for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay1 < kubelet.MaxContainerBackOff {
continue
}
}
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
}
By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
})

113
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
type PrivilegedPodTestConfig struct {
f *framework.Framework
privilegedPod string
privilegedContainer string
notPrivilegedContainer string
pod *v1.Pod
}
var _ = framework.KubeDescribe("PrivilegedPod", func() {
config := &PrivilegedPodTestConfig{
f: framework.NewDefaultFramework("e2e-privileged-pod"),
privilegedPod: "privileged-pod",
privilegedContainer: "privileged-container",
notPrivilegedContainer: "not-privileged-container",
}
It("should enable privileged commands", func() {
By("Creating a pod with a privileged container")
config.createPods()
By("Executing in the privileged container")
config.run(config.privilegedContainer, true)
By("Executing in the non-privileged container")
config.run(config.notPrivilegedContainer, false)
})
})
func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool) {
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
reverseCmd := []string{"ip", "link", "del", "dummy1"}
stdout, stderr, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, cmd...)
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
if expectSuccess {
Expect(err).NotTo(HaveOccurred(), msg)
// We need to clean up the dummy link that was created, as it
// leaks out into the node level -- yuck.
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, reverseCmd...)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("could not remove dummy1 link: %v", err))
} else {
Expect(err).To(HaveOccurred(), msg)
}
}
func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
isPrivileged := true
notPrivileged := false
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: c.privilegedPod,
Namespace: c.f.Namespace.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: busyboxImage,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: busyboxImage,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
},
},
}
}
func (c *PrivilegedPodTestConfig) createPods() {
podSpec := c.createPodsSpec()
c.pod = c.f.PodClient().CreateSync(podSpec)
}

1675
vendor/k8s.io/kubernetes/test/e2e/common/projected.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

140
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = Describe("[sig-api-machinery] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-env-vars
Description: Ensure that secret can be consumed via environment
variables.
*/
framework.ConformanceIt("should be consumable from pods in env vars ", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "SECRET_DATA",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
/*
Testname: secret-configmaps-source
Description: Ensure that secret can be consumed via source of a set
of ConfigMaps.
*/
framework.ConformanceIt("should be consumable via the environment ", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := newEnvFromSecret(f.Namespace.Name, name)
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
{
Prefix: "p_",
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"data_1=value-1", "data_2=value-2", "data_3=value-3",
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
})
func newEnvFromSecret(namespace, name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string][]byte{
"data_1": []byte("value-1\n"),
"data_2": []byte("value-2\n"),
"data_3": []byte("value-3\n"),
},
}
}

View File

@ -0,0 +1,516 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-volume-mount-without-mapping
Description: Ensure that secret can be mounted without mapping to a
pod volume.
*/
framework.ConformanceIt("should be consumable from pods in volume ", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: secret-volume-mount-without-mapping-default-mode
Description: Ensure that secret can be mounted without mapping to a
pod volume in default mode.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: secret-volume-mount-without-mapping-non-root-default-mode-fsgroup
Description: Ensure that secret can be mounted without mapping to a pod
volume as non-root in default mode with fsGroup set.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set ", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
uid := int64(1000)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &uid)
})
/*
Testname: secret-volume-mount-with-mapping
Description: Ensure that secret can be mounted with mapping to a pod
volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
doSecretE2EWithMapping(f, nil)
})
/*
Testname: secret-volume-mount-with-mapping-item-mode
Description: Ensure that secret can be mounted with mapping to a pod
volume in item mode.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set ", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
var (
namespace2 *v1.Namespace
err error
secret2Name = "secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
})
/*
Testname: secret-multiple-volume-mounts
Description: Ensure that secret can be mounted to multiple pod volumes.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod ", func() {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
var (
name = "secret-test-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
volumeName2 = "secret-volume-2"
volumeMountPath2 = "/etc/secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
})
})
/*
Testname: secret-mounted-volume-optional-update-change
Description: Ensure that optional update change to secret can be
reflected on a mounted volume.
*/
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/secret-volumes"
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
deleteContainerName := "dels-volume-test"
deleteVolumeName := "deletes-volume"
deleteSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: deleteName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
updateContainerName := "upds-volume-test"
updateVolumeName := "updates-volume"
updateSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: updateName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
createName := "s-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "creates-volume-test"
createVolumeName := "creates-volume"
createSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: createName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: deleteVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: deleteName,
Optional: &trueVal,
},
},
},
{
Name: updateVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: updateName,
Optional: &trueVal,
},
},
},
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: createName,
Optional: &trueVal,
},
},
},
},
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
MountPath: path.Join(volumeMountPath, "delete"),
ReadOnly: true,
},
},
},
{
Name: updateContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
MountPath: path.Join(volumeMountPath, "update"),
ReadOnly: true,
},
},
},
{
Name: createContainerName,
Image: mountImage,
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
})
})
func secretForTest(namespace, name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string,
fsGroup *int64, uid *int64) {
var (
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
if fsGroup != nil || uid != nil {
pod.Spec.SecurityContext = &v1.PodSecurityContext{
FSGroup: fsGroup,
RunAsUser: uid,
}
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
var (
name = "secret-test-map-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "new-path-data-1",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if mode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode
} else {
defaultItemMode := int32(0644)
mode = &defaultItemMode
}
modeString := fmt.Sprintf("%v", os.FileMode(*mode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/new-path-data-1\": value-1",
"mode of file \"/etc/secret-volume/new-path-data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}

210
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Sysctls", func() {
f := framework.NewDefaultFramework("sysctl")
var podClient *framework.PodClient
testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
pod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: busyboxImage,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return &pod
}
BeforeEach(func() {
podClient = f.PodClient()
})
It("should support sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod()
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should reject invalid sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "foo-",
Value: "bar",
},
{
Name: "kernel.shmmax",
Value: "100000000",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shmall",
Value: "100000000",
},
{
Name: "bar..",
Value: "42",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(pod)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
Expect(err.Error()).To(ContainSubstring(`safe-and-unsafe`))
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
})
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.msgmax",
Value: "10000000000",
},
})
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
By("Checking that the pod was rejected")
Expect(ev).ToNot(BeNil())
Expect(ev.Reason).To(Equal("SysctlForbidden"))
})
})

136
vendor/k8s.io/kubernetes/test/e2e/common/util.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
type Suite string
const (
E2E Suite = "e2e"
NodeE2E Suite = "node e2e"
)
var (
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
busyboxImage = "busybox"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
// before a tests starts, so that the tests won't fail due image pulling flakes. Currently, this is
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"busybox",
imageutils.GetE2EImage(imageutils.EntrypointTester),
imageutils.GetE2EImage(imageutils.Liveness),
imageutils.GetE2EImage(imageutils.Mounttest),
imageutils.GetE2EImage(imageutils.MounttestUser),
imageutils.GetE2EImage(imageutils.Netexec),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.TestWebserver),
imageutils.GetE2EImage(imageutils.Hostexec),
"gcr.io/google_containers/volume-nfs:0.8",
"gcr.io/google_containers/volume-gluster:0.2",
"gcr.io/google_containers/e2e-net-amd64:1.0",
)
func svcByName(name string, port int) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Selector: map[string]string{
"name": name,
},
Ports: []v1.ServicePort{{
Port: int32(port),
TargetPort: intstr.FromInt(port),
}},
},
}
}
func NewSVCByName(c clientset.Interface, ns, name string) error {
const testPort = 9376
_, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort))
return err
}
// NewRCByName creates a replication controller with a selector by name of name.
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.CoreV1().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
func RestartNodes(c clientset.Interface, nodeNames []string) error {
// List old boot IDs.
oldBootIDs := make(map[string]string)
for _, name := range nodeNames {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err)
}
oldBootIDs[name] = node.Status.NodeInfo.BootID
}
// Reboot the nodes.
args := []string{
"compute",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
"instances",
"reset",
}
args = append(args, nodeNames...)
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
stdout, stderr, err := framework.RunCmd("gcloud", args...)
if err != nil {
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
// Wait for their boot IDs to change.
for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
}
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
}); err != nil {
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
}
}
return nil
}

153
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
// GlusterFS test is duplicated from test/e2e/volumes.go. Any changes made there
// should be duplicated here
package common
import (
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// These tests need privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = Describe("[sig-storage] GCP Volumes", func() {
f := framework.NewDefaultFramework("gcp-volume")
// note that namespace deletion is handled by delete-namespace flag
// filled in BeforeEach
var namespace *v1.Namespace
var c clientset.Interface
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
namespace = f.Namespace
c = f.ClientSet
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFSv4", func() {
It("should be mountable for NFSv4", func() {
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
File: "index.html",
ExpectedContent: "Hello from NFS!",
},
}
// Must match content of test/images/volumes-tester/nfs/index.html
framework.TestVolumeClient(c, config, nil, tests)
})
})
Describe("NFSv3", func() {
It("should be mountable for NFSv3", func() {
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/exports",
ReadOnly: true,
},
},
File: "index.html",
ExpectedContent: "Hello from NFS!",
},
}
// Must match content of test/images/volume-tester/nfs/index.html
framework.TestVolumeClient(c, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
// create gluster server and endpoints
config, _, _ := framework.NewGlusterfsServer(c, namespace.Name)
name := config.Prefix + "-server"
defer func() {
framework.VolumeTestCleanup(f, config)
err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
framework.TestVolumeClient(c, config, nil, tests)
})
})
})