Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -3,12 +3,15 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"csi_objects.go",
"csi_volumes.go",
"detach_mounted.go",
"empty_dir_wrapper.go",
"ephemeral_volume.go",
"flexvolume.go",
"flexvolume_mounted_volume_resize.go",
"flexvolume_online_resize.go",
"generic_persistent_volume-disruptive.go",
"in_tree_volumes.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
@ -20,7 +23,7 @@ go_library(
"regional_pd.go",
"subpath.go",
"volume_expand.go",
"volume_io.go",
"volume_limits.go",
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
@ -29,54 +32,59 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)
@ -91,6 +99,9 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/storage/drivers:all-srcs",
"//test/e2e/storage/testpatterns:all-srcs",
"//test/e2e/storage/testsuites:all-srcs",
"//test/e2e/storage/utils:all-srcs",
"//test/e2e/storage/vsphere:all-srcs",
],

View File

@ -1,415 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
)
var csiImageVersions = map[string]string{
"hostpathplugin": "v0.2.0",
"csi-attacher": "v0.2.0",
"csi-provisioner": "v0.2.1",
"driver-registrar": "v0.2.0",
}
func csiContainerImage(image string) string {
var fullName string
fullName += framework.TestContext.CSIImageRegistry + "/" + image + ":"
if framework.TestContext.CSIImageVersion != "" {
fullName += framework.TestContext.CSIImageVersion
} else {
fullName += csiImageVersions[image]
}
return fullName
}
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
creatingString := "Creating"
if teardown {
creatingString = "Deleting"
}
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
}
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiContainerImage("csi-provisioner"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiContainerImage("driver-registrar"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiContainerImage("csi-attacher"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiContainerImage("hostpathplugin"),
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}
func deployGCEPDCSIDriver(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
nodeSA *v1.ServiceAccount,
controllerSA *v1.ServiceAccount,
) {
// Get API Objects from manifests
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
framework.ExpectNoError(err, "Failed to create Service from manifest")
// Got all objects from manifests now try to delete objects
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
}
}
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
}
}
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
}
}
if teardown {
return
}
// Create new API Objects through client
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
}

View File

@ -17,209 +17,323 @@ limitations under the License.
package storage
import (
"context"
"fmt"
"math/rand"
"time"
"regexp"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"crypto/sha256"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
)
type csiTestDriver interface {
createCSIDriver()
cleanupCSIDriver()
createStorageClassTest(node v1.Node) storageClassTest
// List of testDrivers to be executed in below loop
var csiTestDrivers = []func() drivers.TestDriver{
drivers.InitHostPathCSIDriver,
drivers.InitGcePDCSIDriver,
drivers.InitGcePDExternalCSIDriver,
drivers.InitHostV0PathCSIDriver,
}
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
// List of testSuites to be executed in below loop
var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite,
testsuites.InitProvisioningTestSuite,
}
func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern {
tunedPatterns := []testpatterns.TestPattern{}
for _, pattern := range patterns {
// Skip inline volume and pre-provsioned PV tests for csi drivers
if pattern.VolType == testpatterns.InlineVolume || pattern.VolType == testpatterns.PreprovisionedPV {
continue
}
tunedPatterns = append(tunedPatterns, pattern)
}
return tunedPatterns
}
// This executes testSuites for csi volumes.
var _ = utils.SIGDescribe("CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
f := framework.NewDefaultFramework("csi-volumes")
var (
cancel context.CancelFunc
cs clientset.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
)
BeforeEach(func() {
ctx, c := context.WithCancel(context.Background())
cancel = c
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "csi",
ClientNodeName: node.Name,
ServerNodeName: node.Name,
WaitForCompletion: true,
Namespace: ns.Name,
Prefix: "csi",
}
// Debugging of the following tests heavily depends on the log output
// of the different containers. Therefore include all of that in log
// files (when using --report-dir, as in the CI) or the output stream
// (otherwise).
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
csiDriverRegistrarClusterRole(config)
})
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
AfterEach(func() {
cancel()
})
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
var (
driver csiTestDriver
)
for _, initDriver := range csiTestDrivers {
curDriver := initDriver()
Context(drivers.GetDriverNameWithFeatureTags(curDriver), func() {
driver := curDriver
BeforeEach(func() {
driver = curInitCSIDriver(f, config)
driver.createCSIDriver()
// setupDriver
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
// Cleanup driver
driver.CleanupDriver()
})
It("should provision storage", func() {
t := driver.createStorageClassTest(node)
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
testsuites.RunTestSuite(f, config, driver, csiTestSuites, csiTunePattern)
})
}
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
Context("CSI attach test using HostPath driver [Feature:CSIDriverRegistry]", func() {
var (
cs clientset.Interface
csics csiclient.Interface
driver drivers.TestDriver
)
BeforeEach(func() {
cs = f.ClientSet
csics = f.CSIClientSet
driver = drivers.InitHostPathCSIDriver()
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
driver.CleanupDriver()
})
tests := []struct {
name string
driverAttachable bool
driverExists bool
expectVolumeAttachment bool
}{
{
name: "non-attachable volume does not need VolumeAttachment",
driverAttachable: false,
driverExists: true,
expectVolumeAttachment: false,
},
{
name: "attachable volume needs VolumeAttachment",
driverAttachable: true,
driverExists: true,
expectVolumeAttachment: true,
},
{
name: "volume with no CSI driver needs VolumeAttachment",
driverExists: false,
expectVolumeAttachment: true,
},
}
for _, t := range tests {
test := t
It(test.name, func() {
if test.driverExists {
csiDriver := createCSIDriver(csics, drivers.GetUniqueDriverName(driver), test.driverAttachable)
if csiDriver != nil {
defer csics.CsiV1alpha1().CSIDrivers().Delete(csiDriver.Name, nil)
}
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
NodeName: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, ns.Name)
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
if claim != nil {
defer cs.CoreV1().PersistentVolumeClaims(ns.Name).Delete(claim.Name, nil)
}
if pod != nil {
// Fully delete (=unmount) the pod before deleting CSI driver
defer framework.DeletePodWithWait(f, cs, pod)
}
if pod == nil {
return
}
err := framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
By("Checking if VolumeAttachment was created for the pod")
// Check that VolumeAttachment does not exist
handle := getVolumeHandle(cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, scTest.Provisioner, nodeName)))
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
_, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
if test.expectVolumeAttachment {
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
}
} else {
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
}
}
if !test.expectVolumeAttachment {
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
}
})
}
})
})
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
return &hostpathCSIDriver{
combinedClusterRoleNames: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
func createCSIDriver(csics csiclient.Interface, name string, attachable bool) *csiv1alpha1.CSIDriver {
By("Creating CSIDriver instance")
driver := &csiv1alpha1.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
f: f,
config: config,
}
}
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
return storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
}
func (h *hostpathCSIDriver) createCSIDriver() {
By("deploying csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, h.serviceAccount)
}
func (h *hostpathCSIDriver) cleanupCSIDriver() {
By("uninstalling csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
csiHostPathPod(cs, config, true, f, h.serviceAccount)
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
}
type gcePDCSIDriver struct {
controllerClusterRoles []string
nodeClusterRoles []string
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
nodeClusterRoles: []string{
csiDriverRegistrarClusterRoleName,
Spec: csiv1alpha1.CSIDriverSpec{
AttachRequired: &attachable,
},
controllerClusterRoles: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
}
driver, err := csics.CsiV1alpha1().CSIDrivers().Create(driver)
framework.ExpectNoError(err, "Failed to create CSIDriver: %v", err)
return driver
}
func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
Expect(pv.Spec.CSI).NotTo(BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "")
class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err, "Failed to create class : %v", err)
claim := newClaim(t, ns, "")
claim.Spec.StorageClassName = &class.Name
claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err, "Failed to create claim: %v", err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
ReadOnly: false,
},
},
},
},
},
f: f,
config: config,
}
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
if len(t.NodeName) != 0 {
pod.Spec.NodeName = t.NodeName
}
}
func (g *gcePDCSIDriver) createCSIDriver() {
By("deploying gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
func (g *gcePDCSIDriver) cleanupCSIDriver() {
By("uninstalling gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod
}

View File

@ -0,0 +1,227 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"math/rand"
"path"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
durationForStuckMount = 110 * time.Second
)
var _ = utils.SIGDescribe("Detaching volumes", func() {
f := framework.NewDefaultFramework("flexvolume")
// note that namespace deletion is handled by delete-namespace flag
var cs clientset.Interface
var ns *v1.Namespace
var node v1.Node
var suffix string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessSSHKeyPresent()
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
suffix = ns.Name
})
It("should not work when mount is in progress", func() {
driver := "attachable-with-long-format"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
volumeSource := v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
Driver: "k8s/" + driverInstallAs,
},
}
clientPod := getFlexVolumePod(volumeSource, node.Name)
By("Creating pod that uses slow format volume")
pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod)
Expect(err).NotTo(HaveOccurred())
uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs)
By("waiting for volumes to be attached to node")
err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName)
Expect(err).NotTo(HaveOccurred(), "while waiting for volume to attach to %s node", node.Name)
By("waiting for volume-in-use on the node after pod creation")
err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName)
Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use")
By("waiting for kubelet to start mounting the volume")
time.Sleep(20 * time.Second)
By("Deleting the flexvolume pod")
err = framework.DeletePodWithWait(f, cs, pod)
Expect(err).NotTo(HaveOccurred(), "in deleting the pod")
// Wait a bit for node to sync the volume status
time.Sleep(30 * time.Second)
By("waiting for volume-in-use on the node after pod deletion")
err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName)
Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use")
// Wait for 110s because mount device operation has a sleep of 120 seconds
// we previously already waited for 30s.
time.Sleep(durationForStuckMount)
By("waiting for volume to disappear from node in-use")
err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName)
Expect(err).NotTo(HaveOccurred(), "while waiting for volume to be removed from in-use")
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})
})
func getUniqueVolumeName(pod *v1.Pod, driverName string) string {
return fmt.Sprintf("flexvolume-k8s/%s/%s", driverName, pod.Spec.Volumes[0].Name)
}
func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName string) error {
return wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
}
volumeInUSe := node.Status.VolumesInUse
for _, volume := range volumeInUSe {
if string(volume) == volumeName {
return false, nil
}
}
return true, nil
})
}
func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName string) error {
return wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) {
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
}
volumeAttached := node.Status.VolumesAttached
for _, volume := range volumeAttached {
if string(volume.Name) == volumeName {
return true, nil
}
}
return false, nil
})
}
func waitForVolumesInUse(client clientset.Interface, nodeName, volumeName string) error {
return wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
}
volumeInUSe := node.Status.VolumesInUse
for _, volume := range volumeInUSe {
if string(volume) == volumeName {
return true, nil
}
}
return false, nil
})
}
func getFlexVolumePod(volumeSource v1.VolumeSource, nodeName string) *v1.Pod {
var gracePeriod int64
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "flexvolume-detach-test" + "-client",
Labels: map[string]string{
"role": "flexvolume-detach-test" + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "flexvolume-detach-test" + "-client",
Image: BusyBoxImage,
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/foo/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "test-long-detach-flex",
MountPath: "/opt/foo",
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{
{
Name: "test-long-detach-flex",
VolumeSource: volumeSource,
},
},
NodeName: nodeName,
},
}
return clientPod
}

View File

@ -0,0 +1,47 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"csi.go",
"csi_objects.go",
"in_tree.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/drivers",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,183 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drivers
import (
"fmt"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestDriver represents an interface for a driver to be tested in TestSuite
type TestDriver interface {
// GetDriverInfo returns DriverInfo for the TestDriver
GetDriverInfo() *DriverInfo
// CreateDriver creates all driver resources that is required for TestDriver method
// except CreateVolume
CreateDriver()
// CreateDriver cleanup all the resources that is created in CreateDriver
CleanupDriver()
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
SkipUnsupportedTest(testpatterns.TestPattern)
}
// PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume
type PreprovisionedVolumeTestDriver interface {
TestDriver
// CreateVolume creates a pre-provisioned volume.
CreateVolume(testpatterns.TestVolType) interface{}
// DeleteVolume deletes a volume that is created in CreateVolume
DeleteVolume(testpatterns.TestVolType, interface{})
}
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
type InlineVolumeTestDriver interface {
PreprovisionedVolumeTestDriver
// GetVolumeSource returns a volumeSource for inline volume.
// It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource
}
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
type PreprovisionedPVTestDriver interface {
PreprovisionedVolumeTestDriver
// GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource
}
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
type DynamicPVTestDriver interface {
TestDriver
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
// It will set fsType to the StorageClass, if TestDriver supports it.
// It will return nil, if the TestDriver doesn't support it.
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
}
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
type DriverInfo struct {
Name string // Name of the driver
FeatureTag string // FeatureTag for the driver
MaxFileSize int64 // Max file size to be tested for this driver
SupportedFsType sets.String // Map of string for supported fs type
SupportedMountOption sets.String // Map of string for supported mount option
RequiredMountOption sets.String // Map of string for required mount option (Optional)
IsPersistent bool // Flag to represent whether it provides persistency
IsFsGroupSupported bool // Flag to represent whether it supports fsGroup
IsBlockSupported bool // Flag to represent whether it supports Block Volume
// Parameters below will be set inside test loop by using SetCommonDriverParameters.
// Drivers that implement TestDriver is required to set all the above parameters
// and return DriverInfo on GetDriverInfo() call.
Framework *framework.Framework // Framework for the test
Config framework.VolumeTestConfig // VolumeTestConfig for thet test
}
// GetDriverNameWithFeatureTags returns driver name with feature tags
// For example)
// - [Driver: nfs]
// - [Driver: rbd][Feature:Volumes]
func GetDriverNameWithFeatureTags(driver TestDriver) string {
dInfo := driver.GetDriverInfo()
return fmt.Sprintf("[Driver: %s]%s", dInfo.Name, dInfo.FeatureTag)
}
// CreateVolume creates volume for test unless dynamicPV test
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface{} {
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
return pDriver.CreateVolume(volType)
}
case testpatterns.DynamicPV:
// No need to create volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
return nil
}
// DeleteVolume deletes volume for test unless dynamicPV test
func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType, testResource interface{}) {
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
pDriver.DeleteVolume(volType, testResource)
}
case testpatterns.DynamicPV:
// No need to delete volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
}
// SetCommonDriverParameters sets a common driver parameters to TestDriver
// This function is intended to be called in BeforeEach() inside test loop.
func SetCommonDriverParameters(
driver TestDriver,
f *framework.Framework,
config framework.VolumeTestConfig,
) {
dInfo := driver.GetDriverInfo()
dInfo.Framework = f
dInfo.Config = config
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storagev1.VolumeBindingMode,
ns string,
suffix string,
) *storagev1.StorageClass {
if bindingMode == nil {
defaultBindingMode := storagev1.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storagev1.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
func GetUniqueDriverName(driver TestDriver) string {
return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Framework.UniqueName)
}

View File

@ -0,0 +1,367 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This file defines various csi volume test drivers for TestSuites.
*
* There are two ways, how to prepare test drivers:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* It creates a server pod which defines one volume for the tests.
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
* Appropriate server or cloud provider must exist somewhere outside
* the tested Kubernetes cluster. CreateVolume will create a new volume to be
* used in the TestSuites for inlineVolume or DynamicPV tests.
*/
package drivers
import (
"fmt"
"math/rand"
"time"
. "github.com/onsi/ginkgo"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// hostpathCSI
type hostpathCSIDriver struct {
cleanup func()
driverInfo DriverInfo
}
var _ TestDriver = &hostpathCSIDriver{}
var _ DynamicPVTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver() TestDriver {
return &hostpathCSIDriver{
driverInfo: DriverInfo{
Name: "csi-hostpath",
FeatureTag: "",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
"", // Default fsType
),
IsPersistent: true,
IsFsGroupSupported: false,
IsBlockSupported: false,
},
}
}
func (h *hostpathCSIDriver) GetDriverInfo() *DriverInfo {
return &h.driverInfo
}
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := GetUniqueDriverName(h)
parameters := map[string]string{}
ns := h.driverInfo.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return getStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (h *hostpathCSIDriver) CreateDriver() {
By("deploying csi hostpath driver")
f := h.driverInfo.Framework
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
h.driverInfo.Config.ClientNodeName = node.Name
h.driverInfo.Config.ServerNodeName = node.Name
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
// settings are ignored for this test. We could patch the image definitions.
o := utils.PatchCSIOptions{
OldDriverName: h.driverInfo.Name,
NewDriverName: GetUniqueDriverName(h),
DriverContainerName: "hostpath",
ProvisionerContainerName: "csi-provisioner",
NodeName: h.driverInfo.Config.ServerNodeName,
}
cleanup, err := h.driverInfo.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(h.driverInfo.Framework, o, item)
},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml",
)
h.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi hostpath driver: %v", err)
}
}
func (h *hostpathCSIDriver) CleanupDriver() {
if h.cleanup != nil {
By("uninstalling csi hostpath driver")
h.cleanup()
}
}
// hostpathV0CSIDriver
type hostpathV0CSIDriver struct {
cleanup func()
driverInfo DriverInfo
}
var _ TestDriver = &hostpathV0CSIDriver{}
var _ DynamicPVTestDriver = &hostpathV0CSIDriver{}
// InitHostPathV0CSIDriver returns hostpathV0CSIDriver that implements TestDriver interface
func InitHostV0PathCSIDriver() TestDriver {
return &hostpathV0CSIDriver{
driverInfo: DriverInfo{
Name: "csi-hostpath-v0",
FeatureTag: "",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
"", // Default fsType
),
IsPersistent: true,
IsFsGroupSupported: false,
IsBlockSupported: false,
},
}
}
func (h *hostpathV0CSIDriver) GetDriverInfo() *DriverInfo {
return &h.driverInfo
}
func (h *hostpathV0CSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostpathV0CSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := GetUniqueDriverName(h)
parameters := map[string]string{}
ns := h.driverInfo.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return getStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (h *hostpathV0CSIDriver) CreateDriver() {
By("deploying csi hostpath v0 driver")
f := h.driverInfo.Framework
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
h.driverInfo.Config.ClientNodeName = node.Name
h.driverInfo.Config.ServerNodeName = node.Name
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
// settings are ignored for this test. We could patch the image definitions.
o := utils.PatchCSIOptions{
OldDriverName: h.driverInfo.Name,
NewDriverName: GetUniqueDriverName(h),
DriverContainerName: "hostpath",
ProvisionerContainerName: "csi-provisioner-v0",
NodeName: h.driverInfo.Config.ServerNodeName,
}
cleanup, err := h.driverInfo.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(h.driverInfo.Framework, o, item)
},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-provisioner.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpathplugin.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/e2e-test-rbac.yaml",
)
h.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi hostpath v0 driver: %v", err)
}
}
func (h *hostpathV0CSIDriver) CleanupDriver() {
if h.cleanup != nil {
By("uninstalling csi hostpath v0 driver")
h.cleanup()
}
}
// gce-pd
type gcePDCSIDriver struct {
cleanup func()
driverInfo DriverInfo
}
var _ TestDriver = &gcePDCSIDriver{}
var _ DynamicPVTestDriver = &gcePDCSIDriver{}
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
func InitGcePDCSIDriver() TestDriver {
return &gcePDCSIDriver{
driverInfo: DriverInfo{
Name: "pd.csi.storage.gke.io",
FeatureTag: "[Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
"", // Default fsType
"ext2",
"ext3",
"ext4",
"xfs",
),
IsPersistent: true,
IsFsGroupSupported: true,
IsBlockSupported: false,
},
}
}
func (g *gcePDCSIDriver) GetDriverInfo() *DriverInfo {
return &g.driverInfo
}
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
f := g.driverInfo.Framework
cs := f.ClientSet
config := g.driverInfo.Config
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipIfMultizone(cs)
// TODO(#62561): Use credentials through external pod identity when that goes GA instead of downloading keys.
createGCESecrets(cs, config)
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
}
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
parameters := map[string]string{"type": "pd-standard"}
return getStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (g *gcePDCSIDriver) CreateDriver() {
By("deploying csi gce-pd driver")
// It would be safer to rename the gcePD driver, but that
// hasn't been done before either and attempts to do so now led to
// errors during driver registration, therefore it is disabled
// by passing a nil function below.
//
// These are the options which would have to be used:
// o := utils.PatchCSIOptions{
// OldDriverName: g.driverInfo.Name,
// NewDriverName: GetUniqueDriverName(g),
// DriverContainerName: "gce-driver",
// ProvisionerContainerName: "csi-external-provisioner",
// }
cleanup, err := g.driverInfo.Framework.CreateFromManifests(nil,
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
)
g.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err)
}
}
func (g *gcePDCSIDriver) CleanupDriver() {
By("uninstalling gce-pd driver")
if g.cleanup != nil {
g.cleanup()
}
}
// gcePd-external
type gcePDExternalCSIDriver struct {
driverInfo DriverInfo
}
var _ TestDriver = &gcePDExternalCSIDriver{}
var _ DynamicPVTestDriver = &gcePDExternalCSIDriver{}
// InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface
func InitGcePDExternalCSIDriver() TestDriver {
return &gcePDExternalCSIDriver{
driverInfo: DriverInfo{
Name: "pd.csi.storage.gke.io",
// TODO(#70258): this is temporary until we can figure out how to make e2e tests a library
FeatureTag: "[Feature: gcePD-external]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
"", // Default fsType
"ext2",
"ext3",
"ext4",
"xfs",
),
IsPersistent: true,
IsFsGroupSupported: true,
IsBlockSupported: false,
},
}
}
func (g *gcePDExternalCSIDriver) GetDriverInfo() *DriverInfo {
return &g.driverInfo
}
func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipIfMultizone(g.driverInfo.Framework.ClientSet)
}
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
parameters := map[string]string{"type": "pd-standard"}
return getStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (g *gcePDExternalCSIDriver) CreateDriver() {
}
func (g *gcePDExternalCSIDriver) CleanupDriver() {
}

View File

@ -0,0 +1,119 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package drivers
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
var (
csiImageVersion = flag.String("storage.csi.image.version", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
csiImageRegistry = flag.String("storage.csi.image.registry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
csiImageVersions = map[string]string{
"hostpathplugin": "v0.4.0",
"csi-attacher": "v0.4.0",
"csi-provisioner": "v0.4.0",
"driver-registrar": "v0.4.0",
}
)
func csiContainerImage(image string) string {
var fullName string
fullName += *csiImageRegistry + "/" + image + ":"
if *csiImageVersion != "" {
fullName += *csiImageVersion
} else {
fullName += csiImageVersions[image]
}
return fullName
}
func shredFile(filePath string) {
if _, err := os.Stat(filePath); os.IsNotExist(err) {
framework.Logf("File %v was not found, skipping shredding", filePath)
return
}
framework.Logf("Shredding file %v", filePath)
_, _, err := framework.RunCmd("shred", "--remove", filePath)
if err != nil {
framework.Logf("Failed to shred file %v: %v", filePath, err)
}
if _, err := os.Stat(filePath); os.IsNotExist(err) {
framework.Logf("File %v successfully shredded", filePath)
return
}
// Shred failed Try to remove the file for good meausure
err = os.Remove(filePath)
framework.ExpectNoError(err, "Failed to remove service account file %s", filePath)
}
// createGCESecrets downloads the GCP IAM Key for the default compute service account
// and puts it in a secret for the GCE PD CSI Driver to consume
func createGCESecrets(client clientset.Interface, config framework.VolumeTestConfig) {
saEnv := "E2E_GOOGLE_APPLICATION_CREDENTIALS"
saFile := fmt.Sprintf("/tmp/%s/cloud-sa.json", string(uuid.NewUUID()))
os.MkdirAll(path.Dir(saFile), 0750)
defer os.Remove(path.Dir(saFile))
premadeSAFile, ok := os.LookupEnv(saEnv)
if !ok {
framework.Logf("Could not find env var %v, please either create cloud-sa"+
" secret manually or rerun test after setting %v to the filepath of"+
" the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv)
return
}
framework.Logf("Found CI service account key at %v", premadeSAFile)
// Need to copy it saFile
stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile)
framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
defer shredFile(saFile)
// Create Secret with this Service Account
fileBytes, err := ioutil.ReadFile(saFile)
framework.ExpectNoError(err, "Failed to read file %v", saFile)
s := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cloud-sa",
Namespace: config.Namespace,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
filepath.Base(saFile): fileBytes,
},
}
_, err = client.CoreV1().Secrets(config.Namespace).Create(s)
framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName())
}

File diff suppressed because it is too large Load Diff

View File

@ -17,21 +17,20 @@ limitations under the License.
package storage
import (
"fmt"
"strconv"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -56,7 +55,12 @@ const (
var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
f := framework.NewDefaultFramework("emptydir-wrapper")
It("should not conflict", func() {
/*
Release : v1.13
Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict
Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container.
*/
framework.ConformanceIt("should not conflict", func() {
name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume"
@ -76,10 +80,22 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
gitVolumeName := "git-volume"
gitVolumeMountPath := "/etc/git-volume"
gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup()
configMapVolumeName := "configmap-volume"
configMapVolumeMountPath := "/etc/configmap-volume"
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
BinaryData: map[string][]byte{
"data-1": []byte("value-1\n"),
},
}
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -96,11 +112,12 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
},
},
{
Name: gitVolumeName,
Name: configMapVolumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
@ -116,8 +133,8 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
ReadOnly: true,
},
{
Name: gitVolumeName,
MountPath: gitVolumeMountPath,
Name: configMapVolumeName,
MountPath: configMapVolumeMountPath,
},
},
},
@ -131,9 +148,13 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
By("Cleaning up the configmap")
if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
}
By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
}
}()
})
@ -155,7 +176,13 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
// but these cases are harder because tmpfs-based emptyDir
// appears to be less prone to the race problem.
It("should not cause race condition when used for configmaps [Serial] [Slow]", func() {
/*
Release : v1.13
Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race
Description: Slow by design [~180 Seconds].
Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes.
*/
framework.ConformanceIt("should not cause race condition when used for configmaps [Serial] [Slow]", func() {
configMapNames := createConfigmapsForRace(f)
defer deleteConfigMaps(f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
@ -164,6 +191,10 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
}
})
// Slow by design [~150 Seconds].
// This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
// To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pods container.
// This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
gitURL, gitRepo, cleanup := createGitServer(f)
defer cleanup()
@ -353,7 +384,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
Containers: []v1.Container{
{
Name: "test-container",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "10000"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{

View File

@ -27,11 +27,18 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
var _ = utils.SIGDescribe("Ephemeralstorage", func() {
var (
c clientset.Interface

View File

@ -27,12 +27,12 @@ import (
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
versionutil "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
versionutil "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -47,6 +47,7 @@ const (
gciVolumePluginDir = "/home/kubernetes/flexvolume"
gciVolumePluginDirLegacy = "/etc/srv/kubernetes/kubelet-plugins/volume/exec"
gciVolumePluginDirVersion = "1.10.0"
detachTimeout = 10 * time.Second
)
// testFlexVolume tests that a client pod using a given flexvolume driver
@ -72,62 +73,64 @@ func testFlexVolume(driver string, cs clientset.Interface, config framework.Volu
// installFlex installs the driver found at filePath on the node, and restarts
// kubelet if 'restart' is true. If node is nil, installs on the master, and restarts
// controller-manager if 'restart' is true.
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string, restart bool) {
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string) {
flexDir := getFlexDir(c, node, vendor, driver)
flexFile := path.Join(flexDir, driver)
host := ""
var err error
if node != nil {
host = framework.GetNodeExternalIP(node)
host, err = framework.GetNodeExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
}
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
host = net.JoinHostPort(hostName, sshPort)
}
framework.ExpectNoError(err)
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host)
sshAndLog(cmd, host, true /*failOnError*/)
data := generated.ReadOrDie(filePath)
data := testfiles.ReadOrDie(filePath, Fail)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host)
sshAndLog(cmd, host, true /*failOnError*/)
cmd = fmt.Sprintf("sudo chmod +x %s", flexFile)
sshAndLog(cmd, host)
if !restart {
return
}
if node != nil {
err := framework.RestartKubelet(host)
framework.ExpectNoError(err)
err = framework.WaitForKubeletUp(host)
framework.ExpectNoError(err)
} else {
err := framework.RestartControllerManager()
framework.ExpectNoError(err)
err = framework.WaitForControllerManagerUp()
framework.ExpectNoError(err)
}
sshAndLog(cmd, host, true /*failOnError*/)
}
func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) {
flexDir := getFlexDir(c, node, vendor, driver)
host := ""
var err error
if node != nil {
host = framework.GetNodeExternalIP(node)
host, err = framework.GetNodeExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
}
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
host = net.JoinHostPort(hostName, sshPort)
}
if host == "" {
framework.Failf("Error getting node ip : %v", err)
}
cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
sshAndLog(cmd, host)
sshAndLog(cmd, host, false /*failOnError*/)
}
func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string {
volumePluginDir := defaultVolumePluginDir
if framework.ProviderIs("gce") {
if node == nil && framework.MasterOSDistroIs("gci") {
if node == nil && framework.MasterOSDistroIs("gci", "ubuntu") {
v, err := getMasterVersion(c)
if err != nil {
framework.Failf("Error getting master version: %v", err)
@ -138,7 +141,7 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
} else if node != nil && framework.NodeOSDistroIs("gci") {
} else if node != nil && framework.NodeOSDistroIs("gci", "ubuntu") {
if getNodeVersion(node).AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
@ -150,11 +153,11 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str
return flexDir
}
func sshAndLog(cmd, host string) {
func sshAndLog(cmd, host string, failOnError bool) {
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
if result.Code != 0 && failOnError {
framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
}
}
@ -177,7 +180,18 @@ func getNodeVersion(node *v1.Node) *versionutil.Version {
return versionutil.MustParseSemantic(node.Status.NodeInfo.KubeletVersion)
}
var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
func getHostFromHostPort(hostPort string) string {
// try to split host and port
var host string
var err error
if host, _, err = net.SplitHostPort(hostPort); err != nil {
// if SplitHostPort returns an error, the entire hostport is considered as host
host = hostPort
}
return host
}
var _ = utils.SIGDescribe("Flexvolumes", func() {
f := framework.NewDefaultFramework("flexvolume")
// note that namespace deletion is handled by delete-namespace flag
@ -189,9 +203,9 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
var suffix string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessMasterOSDistroIs("gci")
framework.SkipUnlessNodeOSDistroIs("debian", "gci")
framework.SkipUnlessProviderIs("gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessSSHKeyPresent()
cs = f.ClientSet
@ -211,7 +225,7 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f)
@ -229,9 +243,9 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f)
@ -240,27 +254,12 @@ var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// Detach might occur after pod deletion. Wait before deleting driver.
time.Sleep(detachTimeout)
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})
It("should install plugin without kubelet restart", func() {
driver := "dummy"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), false /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
})

View File

@ -0,0 +1,176 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"path"
)
var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
)
f := framework.NewDefaultFramework("mounted-flexvolume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
nodeKey = "mounted_flexvolume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
resizableSc, err = createStorageClass(ns, c)
if err != nil {
fmt.Printf("storage class creation error: %v\n", err)
}
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = getClaim("2Gi", ns)
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
It("Should verify mounted flex volumes can be resized", func() {
driver := "dummy-attachable"
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodeList.Items[0]
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "k8s/" + driver,
}},
NamePrefix: "pv-",
StorageClassName: resizableSc.Name,
VolumeMode: pvc.Spec.VolumeMode,
})
pv, err = framework.CreatePV(c, pv)
Expect(err).NotTo(HaveOccurred(), "Error creating pv %v", err)
By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Getting a pod from deployment")
podList, err := framework.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing")
By("Waiting for deployment to create new pod")
pod, err = waitForDeploymentToRecreatePod(c, deployment)
Expect(err).NotTo(HaveOccurred(), "While waiting for pod to be recreated")
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})

View File

@ -0,0 +1,179 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"path"
)
func createStorageClass(ns string, c clientset.Interface) (*storage.StorageClass, error) {
bindingMode := storage.VolumeBindingImmediate
stKlass := getStorageClass("flex-expand", map[string]string{}, &bindingMode, ns, "resizing")
allowExpansion := true
stKlass.AllowVolumeExpansion = &allowExpansion
var err error
stKlass, err = c.StorageV1().StorageClasses().Create(stKlass)
return stKlass, err
}
var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:ExpandInUsePersistentVolumes]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
nodeList *v1.NodeList
)
f := framework.NewDefaultFramework("mounted-flexvolume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "local")
framework.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
framework.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("unable to find ready and schedulable Node")
}
nodeName = nodeList.Items[0].Name
nodeKey = "mounted_flexvolume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
resizableSc, err = createStorageClass(ns, c)
if err != nil {
fmt.Printf("storage class creation error: %v\n", err)
}
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class: %v", err)
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = getClaim("2Gi", ns)
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc: %v", err)
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
It("should be resizable when mounted", func() {
driver := "dummy-attachable"
node := nodeList.Items[0]
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver))
By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))
pv := framework.MakePersistentVolume(framework.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "k8s/" + driver,
}},
NamePrefix: "pv-",
StorageClassName: resizableSc.Name,
VolumeMode: pvc.Spec.VolumeMode,
})
pv, err = framework.CreatePV(c, pv)
Expect(err).NotTo(HaveOccurred(), "Error creating pv %v", err)
By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
var pvs []*v1.PersistentVolume
pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
var pod *v1.Pod
By("Creating pod")
pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "Failed to create pod %v", err)
defer framework.DeletePodWithWait(f, c, pod)
By("Waiting for pod to go to 'running' state")
err = f.WaitForPodRunning(pod.ObjectMeta.Name)
Expect(err).NotTo(HaveOccurred(), "Pod didn't go to 'running' state %v", err)
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})

View File

@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -61,10 +62,11 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
var (
clientPod *v1.Pod
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
)
BeforeEach(func() {
framework.Logf("Initializing pod and pvcs for test")
clientPod, pvc = createPodPVCFromSC(f, c, ns)
clientPod, pvc, pv = createPodPVCFromSC(f, c, ns)
})
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
@ -76,17 +78,17 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
}
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, nil)
tearDownTestCase(c, f, ns, clientPod, pvc, pv, false)
pvc, clientPod = nil, nil
})
})
})
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) {
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume) {
var err error
test := storageClassTest{
name: "default",
claimSize: "2Gi",
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
pvc := newClaim(test, ns, "default")
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
@ -99,5 +101,5 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test")
return pod, pvc
return pod, pvc, pvs[0]
}

View File

@ -0,0 +1,95 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// List of testDrivers to be executed in below loop
var testDrivers = []func() drivers.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
drivers.InitRbdDriver,
drivers.InitCephFSDriver,
drivers.InitHostPathDriver,
drivers.InitHostPathSymlinkDriver,
drivers.InitEmptydirDriver,
drivers.InitCinderDriver,
drivers.InitGcePdDriver,
drivers.InitVSphereDriver,
drivers.InitAzureDriver,
drivers.InitAwsDriver,
}
// List of testSuites to be executed in below loop
var testSuites = []func() testsuites.TestSuite{
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite,
testsuites.InitProvisioningTestSuite,
}
func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern {
return patterns
}
// This executes testSuites for in-tree volumes.
var _ = utils.SIGDescribe("In-tree Volumes", func() {
f := framework.NewDefaultFramework("volumes")
var (
ns *v1.Namespace
config framework.VolumeTestConfig
)
BeforeEach(func() {
ns = f.Namespace
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "volume",
}
})
for _, initDriver := range testDrivers {
curDriver := initDriver()
Context(drivers.GetDriverNameWithFeatureTags(curDriver), func() {
driver := curDriver
BeforeEach(func() {
// setupDriver
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
driver.CleanupDriver()
})
testsuites.RunTestSuite(f, config, driver, testSuites, intreeTunePattern)
})
}
})

View File

@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -72,9 +73,9 @@ var _ = utils.SIGDescribe("Mounted volume expand[Slow]", func() {
isNodeLabeled = true
}
test := storageClassTest{
name: "default",
claimSize: "2Gi",
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")

View File

@ -84,13 +84,15 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
StorageClassName: &emptyStorageClass,
}
// Get the first ready node IP that is not hosting the NFS pod.
var err error
if clientNodeIP == "" {
framework.Logf("Designating test node")
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node
clientNodeIP = framework.GetNodeExternalIP(clientNode)
clientNodeIP, err = framework.GetNodeExternalIP(clientNode)
framework.ExpectNoError(err)
break
}
}
@ -206,7 +208,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, pv)
tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */)
pv, pvc, clientPod = nil, nil, nil
})
@ -275,11 +277,14 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
}
// tearDownTestCase destroy resources created by initTestCase.
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) {
// Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
if pv != nil {
if forceDeletePV && pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
return
}
err := framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", pv.Name)
}

View File

@ -39,7 +39,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -78,7 +80,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
mathrand.Seed(time.Now().UTC().UnixNano())
mathrand.Seed(time.Now().UnixNano())
})
Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() {
@ -384,7 +386,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
if disruptOp == deleteNode {
By("getting gce instances")
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
@ -475,7 +477,7 @@ func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName
func detachPD(nodeName types.NodeName, pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
if err != nil {
return err
}
@ -526,7 +528,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
if numContainers > 1 {
containers[i].Name = fmt.Sprintf("mycontainer%v", i+1)
}
containers[i].Image = "busybox"
containers[i].Image = imageutils.GetE2EImage(imageutils.BusyBox)
containers[i].Command = []string{"sleep", "6000"}
containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
for k := range diskNames {
@ -575,11 +577,11 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
return pod
}
// Waits for specified PD to to detach from specified hostName
// Waits for specified PD to detach from specified hostName
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
if err != nil {
return err
}

View File

@ -26,12 +26,13 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node
func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
gceCloud, err := framework.GetGCECloud()
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
Expect(err).NotTo(HaveOccurred())

View File

@ -25,9 +25,9 @@ import (
"sync"
"time"
"github.com/ghodss/yaml"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/yaml"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -51,6 +51,7 @@ import (
type localTestConfig struct {
ns string
nodes []v1.Node
nodeExecPods map[string]*v1.Pod
node0 *v1.Node
client clientset.Interface
scName string
@ -77,8 +78,12 @@ const (
GCELocalSSDVolumeType localVolumeType = "gce-localssd-scsi-fs"
// Creates a local file, formats it, and maps it as a block device.
BlockLocalVolumeType localVolumeType = "block"
// Creates a local file, formats it, and mounts it to use as local volume.
BlockFsLocalVolumeType localVolumeType = "blockfs"
// Creates a local file serving as the backing for block device., formats it,
// and mounts it to use as FS mode local volume.
BlockFsWithFormatLocalVolumeType localVolumeType = "blockfswithformat"
// Creates a local file serving as the backing for block device. do not format it manually,
// and mounts it to use as FS mode local volume.
BlockFsWithoutFormatLocalVolumeType localVolumeType = "blockfswithoutformat"
)
var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *localTestVolume{
@ -89,7 +94,8 @@ var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *
DirectoryBindMountedLocalVolumeType: setupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: setupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: setupLocalVolumeBlock,
BlockFsLocalVolumeType: setupLocalVolumeBlockFs,
BlockFsWithFormatLocalVolumeType: setupLocalVolumeBlockFsWithFormat,
BlockFsWithoutFormatLocalVolumeType: setupLocalVolumeBlockFsWithoutFormat,
}
var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTestVolume){
@ -100,7 +106,8 @@ var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTes
DirectoryBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: cleanupLocalVolumeBlock,
BlockFsLocalVolumeType: cleanupLocalVolumeBlockFs,
BlockFsWithFormatLocalVolumeType: cleanupLocalVolumeBlockFsWithFormat,
BlockFsWithoutFormatLocalVolumeType: cleanupLocalVolumeBlockFsWithoutFormat,
}
type localTestVolume struct {
@ -176,8 +183,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
// Get all the schedulable nodes
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
@ -197,6 +202,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ns: f.Namespace.Name,
client: f.ClientSet,
nodes: nodes.Items[:maxLen],
nodeExecPods: make(map[string]*v1.Pod, maxLen),
node0: node0,
scName: scName,
ssTester: ssTester,
@ -212,11 +218,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
if testVolType == GCELocalSSDVolumeType {
serialStr = " [Serial]"
}
alphaStr := ""
if testVolType == BlockLocalVolumeType {
alphaStr = " [Feature:BlockVolume]"
}
ctxString := fmt.Sprintf("[Volume type: %s]%v%v", testVolType, serialStr, alphaStr)
ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr)
testMode := immediateMode
Context(ctxString, func() {
@ -224,7 +226,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
BeforeEach(func() {
if testVolType == GCELocalSSDVolumeType {
SkipUnlessLocalSSDExists("scsi", "fs", config.node0)
SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0)
}
setupStorageClass(config, &testMode)
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode)
@ -247,6 +249,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pod1, pod1Err = createLocalPod(config, testVol, nil)
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
})
AfterEach(func() {
@ -256,16 +263,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
It("should be able to mount volume and read from pod1", func() {
By("Reading in pod1")
// testFileContent was written during setupLocalVolume
// testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
})
It("should be able to mount volume and write from pod1", func() {
// testFileContent was written during setupLocalVolume
// testFileContent was written in BeforeEach
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
By("Writing in pod1")
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVolType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVolType)
podRWCmdExec(pod1, writeCmd)
})
})
@ -346,12 +353,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
Context("Local volume that cannot be mounted [Slow]", func() {
// TODO:
// - check for these errors in unit tests intead
// - check for these errors in unit tests instead
It("should fail due to non-existent path", func() {
ep := &eventPatterns{
reason: "FailedMount",
pattern: make([]string, 2)}
ep.pattern = append(ep.pattern, "MountVolume.SetUp failed")
ep.pattern = append(ep.pattern, "MountVolume.NewMounter initialization failed")
testVol := &localTestVolume{
node: config.node0,
@ -461,8 +468,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
writeCmd, _ := createWriteAndReadCmds(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
err = framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, config.node0)
writeCmd := createWriteCmd(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
err = issueNodeCommand(config, writeCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
@ -472,7 +479,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
Expect(err).NotTo(HaveOccurred())
Expect(newPV.UID).NotTo(Equal(oldPV.UID))
fileDoesntExistCmd := createFileDoesntExistCmd(volumePath, testFile)
err = framework.IssueSSHCommand(fileDoesntExistCmd, framework.TestContext.Provider, config.node0)
err = issueNodeCommand(config, fileDoesntExistCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Deleting provisioner daemonset")
@ -483,7 +490,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
directoryPath := filepath.Join(config.discoveryDir, "notbindmount")
By("Creating a directory, not bind mounted, in discovery directory")
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", directoryPath)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0)
err := issueNodeCommand(config, mkdirCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Starting a provisioner daemonset")
@ -504,7 +511,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should discover dynamicly created local persistent volume mountpoint in discovery directory", func() {
It("should discover dynamically created local persistent volume mountpoint in discovery directory", func() {
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
@ -524,17 +531,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
})
})
Context("StatefulSet with pod anti-affinity", func() {
Context("StatefulSet with pod affinity [Slow]", func() {
var testVols map[string][]*localTestVolume
const (
ssReplicas = 3
volsPerNode = 2
volsPerNode = 6
)
BeforeEach(func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
setupStorageClass(config, &waitMode)
testVols = map[string][]*localTestVolume{}
@ -553,10 +557,34 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
cleanupStorageClass(config)
})
It("should use volumes spread across nodes", func() {
It("should use volumes spread across nodes when pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode)
validateStatefulSet(config, ss)
ss := createStatefulSet(config, ssReplicas, volsPerNode, true, false)
validateStatefulSet(config, ss, true)
})
It("should use volumes on one node when pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false)
validateStatefulSet(config, ss, false)
})
It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() {
if len(config.nodes) < ssReplicas {
framework.Skipf("Runs only when number of nodes >= %v", ssReplicas)
}
By("Creating a StatefulSet with pod anti-affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, true, true)
validateStatefulSet(config, ss, true)
})
It("should use volumes on one node when pod management is parallel and pod has affinity", func() {
By("Creating a StatefulSet with pod affinity on nodes")
ss := createStatefulSet(config, ssReplicas, 1, false, true)
validateStatefulSet(config, ss, false)
})
})
@ -725,8 +753,6 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
err = framework.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace)
Expect(err).NotTo(HaveOccurred())
cleanupLocalVolumes(config, []*localTestVolume{testVol})
}
type eventPatterns struct {
@ -760,7 +786,12 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
// testFileContent was written during setupLocalVolume
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Creating pod2 to read from the PV")
@ -768,16 +799,16 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
Expect(pod2Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod2, config.node0.Name)
// testFileContent was written during setupLocalVolume
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
writeCmd = createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
By("Writing in pod2")
podRWCmdExec(pod2, writeCmd)
By("Reading in pod2")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
By("Reading in pod1")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod1, testVol.localVolumeType)
By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
@ -792,14 +823,14 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
// testFileContent was written during setupLocalVolume
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
By("Writing in pod1")
podRWCmdExec(pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Deleting pod1")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
@ -809,7 +840,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
verifyLocalPod(config, testVol, pod2, config.node0.Name)
By("Reading in pod2")
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
By("Deleting pod2")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
@ -879,11 +910,13 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
}
}
func setupWriteTestFile(hostDir string, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node) *localTestVolume {
writeCmd, _ := createWriteAndReadCmds(hostDir, testFile, testFileContent, localVolumeType)
By(fmt.Sprintf("Creating test file on node %q in path %q", node.Name, hostDir))
err := framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
func generateLocalTestVolume(hostDir string, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node) *localTestVolume {
if localVolumeType != BlockLocalVolumeType && localVolumeType != BlockFsWithoutFormatLocalVolumeType {
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := issueNodeCommand(config, mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
}
return &localTestVolume{
node: node,
hostDir: hostDir,
@ -895,94 +928,171 @@ func setupLocalVolumeTmpfs(config *localTestConfig, node *v1.Node) *localTestVol
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMountTmpfsLocalVolume(config, hostDir, node)
// populate volume with testFile containing testFileContent
return setupWriteTestFile(hostDir, config, TmpfsLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, TmpfsLocalVolumeType, node)
}
func setupLocalVolumeGCELocalSSD(config *localTestConfig, node *v1.Node) *localTestVolume {
res, err := framework.IssueSSHCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", framework.TestContext.Provider, node)
res, err := issueNodeCommandWithResult(config, "ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
Expect(err).NotTo(HaveOccurred())
dirName := strings.Fields(res.Stdout)[0]
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, GCELocalSSDVolumeType, node)
// gce local ssd does not need to create a directory
return &localTestVolume{
node: node,
hostDir: hostDir,
localVolumeType: GCELocalSSDVolumeType,
}
}
func setupLocalVolumeDirectory(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLocalVolumeType, node)
}
// launchNodeExecPodForLocalPV launches a hostexec pod for local PV and waits
// until it's Running.
func launchNodeExecPodForLocalPV(client clientset.Interface, ns, node string) *v1.Pod {
hostExecPod := framework.NewHostExecPodSpec(ns, fmt.Sprintf("hostexec-%s", node))
hostExecPod.Spec.NodeName = node
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
Name: "rootfs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
ReadOnly: true,
},
}
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: func(privileged bool) *bool {
return &privileged
}(true),
}
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
framework.ExpectNoError(err)
err = framework.WaitForPodRunningInNamespace(client, pod)
framework.ExpectNoError(err)
return pod
}
// issueNodeCommandWithResult issues command on given node and returns stdout.
func issueNodeCommandWithResult(config *localTestConfig, cmd string, node *v1.Node) (string, error) {
var pod *v1.Pod
pod, ok := config.nodeExecPods[node.Name]
if !ok {
pod = launchNodeExecPodForLocalPV(config.client, config.ns, node.Name)
if pod == nil {
return "", fmt.Errorf("failed to create hostexec pod for node %q", node)
}
config.nodeExecPods[node.Name] = pod
}
args := []string{
"exec",
fmt.Sprintf("--namespace=%v", pod.Namespace),
pod.Name,
"--",
"nsenter",
"--mount=/rootfs/proc/1/ns/mnt",
"--",
"sh",
"-c",
cmd,
}
return framework.RunKubectl(args...)
}
// issueNodeCommand works like issueNodeCommandWithResult, but discards result.
func issueNodeCommand(config *localTestConfig, cmd string, node *v1.Node) error {
_, err := issueNodeCommandWithResult(config, cmd, node)
return err
}
func setupLocalVolumeDirectoryLink(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLinkLocalVolumeType, node)
}
func setupLocalVolumeDirectoryBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryBindMountedLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryBindMountedLocalVolumeType, node)
}
func setupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && ln -s %s %s",
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s",
hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkBindMountedLocalVolumeType, node)
return generateLocalTestVolume(hostDir, config, DirectoryLinkBindMountedLocalVolumeType, node)
}
func setupLocalVolumeBlock(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(hostDir, node)
// Populate block volume with testFile containing testFileContent.
volume := setupWriteTestFile(loopDev, config, BlockLocalVolumeType, node)
loopDev := getBlockLoopDev(config, hostDir, node)
volume := generateLocalTestVolume(loopDev, config, BlockLocalVolumeType, node)
volume.hostDir = loopDev
volume.loopDevDir = hostDir
return volume
}
func setupLocalVolumeBlockFs(config *localTestConfig, node *v1.Node) *localTestVolume {
func setupLocalVolumeBlockFsWithFormat(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(hostDir, node)
loopDev := getBlockLoopDev(config, hostDir, node)
// format and mount at hostDir
// give others rwx for read/write testing
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
_, err := issueNodeCommandWithResult(config, cmd, node)
Expect(err).NotTo(HaveOccurred())
// Populate block volume with testFile containing testFileContent.
volume := setupWriteTestFile(hostDir, config, BlockFsLocalVolumeType, node)
volume := generateLocalTestVolume(hostDir, config, BlockFsWithFormatLocalVolumeType, node)
volume.hostDir = hostDir
volume.loopDevDir = loopDev
return volume
}
func setupLocalVolumeBlockFsWithoutFormat(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(config, hostDir, node)
volume := generateLocalTestVolume(loopDev, config, BlockFsWithoutFormatLocalVolumeType, node)
// we do this in order to set block device path to local PV spec path directly
// and test local volume plugin FileSystem mode on block device
volume.hostDir = loopDev
volume.loopDevDir = hostDir
return volume
}
// Determine the /dev/loopXXX device associated with this test, via its hostDir.
func getBlockLoopDev(hostDir string, node *v1.Node) string {
func getBlockLoopDev(config *localTestConfig, hostDir string, node *v1.Node) string {
loopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", hostDir)
loopDevResult, err := framework.IssueSSHCommandWithResult(loopDevCmd, framework.TestContext.Provider, node)
loopDevResult, err := issueNodeCommandWithResult(config, loopDevCmd, node)
Expect(err).NotTo(HaveOccurred())
return strings.TrimSpace(loopDevResult.Stdout)
return strings.TrimSpace(loopDevResult)
}
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
@ -999,8 +1109,9 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
func cleanupLocalVolumeGCELocalSSD(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm %s", volume.hostDir+"/"+testFile)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
file := volume.hostDir + "/" + testFile
removeCmd := fmt.Sprintf("if [ -f %s ]; then rm %s; fi", file, file)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1010,7 +1121,7 @@ func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1018,7 +1129,7 @@ func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1027,8 +1138,8 @@ func cleanupLocalVolumeDirectoryLink(config *localTestConfig, volume *localTestV
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1037,7 +1148,7 @@ func cleanupLocalVolumeDirectoryBindMounted(config *localTestConfig, volume *loc
By("Removing the test directory")
hostDir := volume.hostDir
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1046,8 +1157,8 @@ func cleanupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, volume
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1057,20 +1168,29 @@ func cleanupLocalVolumeBlock(config *localTestConfig, volume *localTestVolume) {
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV and removes the test directory holding the block file.
func cleanupLocalVolumeBlockFs(config *localTestConfig, volume *localTestVolume) {
func cleanupLocalVolumeBlockFsWithFormat(config *localTestConfig, volume *localTestVolume) {
// umount first
By("Umount blockfs mountpoint")
umountCmd := fmt.Sprintf("sudo umount %s", volume.hostDir)
err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, volume.node)
err := issueNodeCommand(config, umountCmd, volume.node)
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
err = issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
func cleanupLocalVolumeBlockFsWithoutFormat(config *localTestConfig, volume *localTestVolume) {
volume.hostDir = volume.loopDevDir
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err := issueNodeCommand(config, removeCmd, volume.node)
Expect(err).NotTo(HaveOccurred())
}
@ -1166,8 +1286,8 @@ func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string)
return pod
}
if volume.localVolumeType == BlockLocalVolumeType {
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilites.
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilities.
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
}
return pod
}
@ -1236,13 +1356,13 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i
func createAndMountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, dir))
err := framework.IssueSSHCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=1m tmpfs-%q %q", dir, dir, dir), framework.TestContext.Provider, node)
err := issueNodeCommand(config, fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=1m tmpfs-%q %q", dir, dir, dir), node)
Expect(err).NotTo(HaveOccurred())
}
func unmountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", node.Name, dir))
err := framework.IssueSSHCommand(fmt.Sprintf("sudo umount %q", dir), framework.TestContext.Provider, node)
err := issueNodeCommand(config, fmt.Sprintf("sudo umount %q", dir), node)
Expect(err).NotTo(HaveOccurred())
}
@ -1251,28 +1371,19 @@ func createAndMapBlockLocalVolume(config *localTestConfig, dir string, node *v1.
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
// Create 10MB file that will serve as the backing for block device.
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=512 count=20480", dir)
losetupLoopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup -f) && echo ${E2E_LOOP_DEV}")
losetupCmd := fmt.Sprintf("sudo losetup ${E2E_LOOP_DEV} %s/file", dir)
err := framework.IssueSSHCommand(fmt.Sprintf("%s && %s && %s && %s", mkdirCmd, ddCmd, losetupLoopDevCmd, losetupCmd), framework.TestContext.Provider, node)
losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir)
err := issueNodeCommand(config, fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
Expect(err).NotTo(HaveOccurred())
}
func unmapBlockLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
loopDev := getBlockLoopDev(dir, node)
loopDev := getBlockLoopDev(config, dir, node)
By(fmt.Sprintf("Unmap block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
err := framework.IssueSSHCommand(losetupDeleteCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, losetupDeleteCmd, node)
Expect(err).NotTo(HaveOccurred())
}
// Create corresponding write and read commands
// to be executed via SSH on the node with the local PV
func createWriteAndReadCmds(testFileDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) (writeCmd string, readCmd string) {
writeCmd = createWriteCmd(testFileDir, testFile, writeTestFileContent, volumeType)
readCmd = createReadCmd(testFileDir, testFile, volumeType)
return writeCmd, readCmd
}
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {
if volumeType == BlockLocalVolumeType {
// testDir is the block device.
@ -1306,13 +1417,13 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
// Read testFile and evaluate whether it contains the testFileContent
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
readCmd := createReadCmd(volumeDir, testFile, volumeType)
readCmd := createReadCmd(testFileDir, testFile, volumeType)
readOut := podRWCmdExec(pod, readCmd)
Expect(readOut).To(ContainSubstring(testFileContent))
}
// Create command to verify that the file doesn't exist
// to be executed via SSH on the node with the local PV
// to be executed via hostexec Pod on the node with the local PV
func createFileDoesntExistCmd(testFileDir string, testFile string) string {
testFilePath := filepath.Join(testFileDir, testFile)
return fmt.Sprintf("[ ! -e %s ]", testFilePath)
@ -1322,7 +1433,7 @@ func createFileDoesntExistCmd(testFileDir string, testFile string) string {
// Fail on error
func podRWCmdExec(pod *v1.Pod, cmd string) string {
out, err := utils.PodExec(pod, cmd)
framework.Logf("podRWCmdExec out: %q err: %q", out, err)
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
Expect(err).NotTo(HaveOccurred())
return out
}
@ -1349,12 +1460,13 @@ func setupLocalVolumeProvisioner(config *localTestConfig) {
By("Bootstrapping local volume provisioner")
createServiceAccount(config)
createProvisionerClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, false /* teardown */, []string{testServiceAccount})
createVolumeConfigMap(config)
for _, node := range config.nodes {
By(fmt.Sprintf("Initializing local volume discovery base path on node %v", node.Name))
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", config.discoveryDir)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, &node)
err := issueNodeCommand(config, mkdirCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
@ -1362,11 +1474,12 @@ func setupLocalVolumeProvisioner(config *localTestConfig) {
func cleanupLocalVolumeProvisioner(config *localTestConfig) {
By("Cleaning up cluster role binding")
deleteClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, true /* teardown */, []string{testServiceAccount})
for _, node := range config.nodes {
By(fmt.Sprintf("Removing the test discovery directory on node %v", node.Name))
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", config.discoveryDir, config.discoveryDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, &node)
err := issueNodeCommand(config, removeCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
@ -1374,24 +1487,24 @@ func cleanupLocalVolumeProvisioner(config *localTestConfig) {
func setupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Creating local directory at path %q", volumePath))
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Mounting local directory at path %q", volumePath))
mntCmd := fmt.Sprintf("sudo mount --bind %v %v", volumePath, volumePath)
err = framework.IssueSSHCommand(mntCmd, framework.TestContext.Provider, node)
err = issueNodeCommand(config, mntCmd, node)
Expect(err).NotTo(HaveOccurred())
}
func cleanupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Unmounting the test mount point from %q", volumePath))
umountCmd := fmt.Sprintf("[ ! -e %v ] || sudo umount %v", volumePath, volumePath)
err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, node)
err := issueNodeCommand(config, umountCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Removing the test mount point")
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", volumePath, volumePath)
err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, node)
err = issueNodeCommand(config, removeCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Cleaning up persistent volume")
@ -1736,7 +1849,7 @@ func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.Pe
return nil, nil
}
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int) *appsv1.StatefulSet {
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet {
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < volumeCount; i++ {
@ -1746,25 +1859,32 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
claims = append(claims, *pvc)
}
affinity := v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"local-volume-test"},
},
},
podAffinityTerms := []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"local-volume-test"},
},
TopologyKey: "kubernetes.io/hostname",
},
},
TopologyKey: "kubernetes.io/hostname",
},
}
affinity := v1.Affinity{}
if anti {
affinity.PodAntiAffinity = &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
}
} else {
affinity.PodAffinity = &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
}
}
labels := map[string]string{"app": "local-volume-test"}
spec := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
@ -1784,7 +1904,7 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
VolumeMounts: mounts,
},
},
@ -1796,6 +1916,10 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
},
}
if parallel {
spec.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
}
ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
@ -1803,16 +1927,21 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
return ss
}
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet) {
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) {
pods := config.ssTester.GetPodList(ss)
// Verify that each pod is on a different node
nodes := sets.NewString()
for _, pod := range pods.Items {
nodes.Insert(pod.Spec.NodeName)
}
Expect(nodes.Len()).To(Equal(len(pods.Items)))
if anti {
// Verify that each pod is on a different node
Expect(nodes.Len()).To(Equal(len(pods.Items)))
} else {
// Verify that all pods are on same node.
Expect(nodes.Len()).To(Equal(1))
}
// Validate all PVCs are bound
for _, pod := range pods.Items {
@ -1829,11 +1958,11 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet) {
// SkipUnlessLocalSSDExists takes in an ssdInterface (scsi/nvme) and a filesystemType (fs/block)
// and skips if a disk of that type does not exist on the node
func SkipUnlessLocalSSDExists(ssdInterface, filesystemType string, node *v1.Node) {
func SkipUnlessLocalSSDExists(config *localTestConfig, ssdInterface, filesystemType string, node *v1.Node) {
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
res, err := framework.IssueSSHCommandWithResult(ssdCmd, framework.TestContext.Provider, node)
res, err := issueNodeCommandWithResult(config, ssdCmd, node)
Expect(err).NotTo(HaveOccurred())
num, err := strconv.Atoi(strings.TrimSpace(res.Stdout))
num, err := strconv.Atoi(strings.TrimSpace(res))
Expect(err).NotTo(HaveOccurred())
if num < 1 {
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)

View File

@ -23,6 +23,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -30,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
@ -281,6 +283,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
By("Deleting the claim")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
By("Re-mounting the volume.")
@ -296,8 +299,126 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
framework.Logf("Pod exited without failure; the volume has been recycled.")
})
})
})
Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() {
AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns)
})
It("should be reschedulable [Slow]", func() {
// Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4
ssTester := framework.NewStatefulSetTester(c)
By("Creating a StatefulSet pod to initialize data")
writeCmd := "true"
for i := 0; i < numVols; i++ {
writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
}
writeCmd += "&& sleep 10000"
probe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Check that the last file got created
Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
},
},
InitialDelaySeconds: 1,
PeriodSeconds: 1,
}
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)
}
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForStatusReplicas(ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating a new Statefulset and validating the data")
validateCmd := "true"
for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
}
validateCmd += "&& sleep 10000"
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
})
})
})
})
func getVolName(i int) string {
return fmt.Sprintf("vol%v", i)
}
func getMountPath(i int) string {
return fmt.Sprintf("/mnt/%v", getVolName(i))
}
func getVolumeFile(i int) string {
return fmt.Sprintf("%v/data%v", getMountPath(i), i)
}
func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v1.PersistentVolumeClaim, readyProbe *v1.Probe) *appsv1.StatefulSet {
ssReplicas := int32(1)
labels := map[string]string{"app": "many-volumes-test"}
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "many-volumes-test",
Namespace: ns,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "many-volumes-test"},
},
Replicas: &ssReplicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
Command: []string{"/bin/sh"},
Args: []string{"-c", cmd},
VolumeMounts: mounts,
ReadinessProbe: readyProbe,
},
},
},
},
VolumeClaimTemplates: claims,
},
}
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package storage
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -77,6 +79,9 @@ var _ = utils.SIGDescribe("PV Protection", func() {
pv, err = client.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred(), "Error creating PV")
By("Waiting for PV to enter phase Available")
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second))
By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PV status")

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -47,8 +48,8 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
By("Creating a PVC")
suffix := "pvc-protection"
defaultSC := getDefaultStorageClassName(client)
testStorageClass := storageClassTest{
claimSize: "1Gi",
testStorageClass := testsuites.StorageClassTest{
ClaimSize: "1Gi",
}
pvc = newClaim(testStorageClass, nameSpace, suffix)
pvc.Spec.StorageClassName = &defaultSC

View File

@ -24,24 +24,33 @@ import (
"strings"
"time"
"encoding/json"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/apis"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
pvDeletionTimeout = 3 * time.Minute
statefulSetReadyTimeout = 3 * time.Minute
taintKeyPrefix = "zoneTaint_"
)
var _ = utils.SIGDescribe("Regional PD", func() {
@ -64,10 +73,23 @@ var _ = utils.SIGDescribe("Regional PD", func() {
testVolumeProvisioning(c, ns)
})
It("should provision storage with delayed binding [Slow]", func() {
testRegionalDelayedBinding(c, ns, 1 /* pvcCount */)
testRegionalDelayedBinding(c, ns, 3 /* pvcCount */)
})
It("should provision storage in the allowedTopologies [Slow]", func() {
testRegionalAllowedTopologies(c, ns)
})
It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() {
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */)
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */)
})
It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
testZonalFailover(c, ns)
})
})
})
@ -76,19 +98,19 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []storageClassTest{
tests := []testsuites.StorageClassTest{
{
name: "HDD Regional PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
Name: "HDD Regional PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(cloudZones, ","),
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
@ -97,16 +119,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
},
},
{
name: "HDD Regional PD with auto zone selection on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
Name: "HDD Regional PD with auto zone selection on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
@ -124,14 +146,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
class := newStorageClass(test, ns, "" /* suffix */)
claim := newClaim(test, ns, "" /* suffix */)
claim.Spec.StorageClassName = &class.Name
testDynamicProvisioning(test, c, claim, class)
testsuites.TestDynamicProvisioning(test, c, claim, class)
}
}
func testZonalFailover(c clientset.Interface, ns string) {
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount := len(nodes.Items)
cloudZones := getTwoRandomZones(c)
class := newRegionalStorageClass(ns, cloudZones)
claimTemplate := newClaimTemplate(ns)
@ -188,41 +207,40 @@ func testZonalFailover(c clientset.Interface, ns string) {
Expect(err).ToNot(HaveOccurred())
podZone := node.Labels[apis.LabelZoneFailureDomain]
// TODO (verult) Consider using node taints to simulate zonal failure instead.
By("deleting instance group belonging to pod's zone")
// Asynchronously detect a pod reschedule is triggered during/after instance group deletion.
waitStatus := make(chan error)
go func() {
waitStatus <- waitForStatefulSetReplicasNotReady(statefulSet.Name, ns, c)
}()
cloud, err := framework.GetGCECloud()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
instanceGroupName := framework.TestContext.CloudConfig.NodeInstanceGroup
instanceGroup, err := cloud.GetInstanceGroup(instanceGroupName, podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group %s in zone %s", instanceGroupName, podZone)
templateName, err := framework.GetManagedInstanceGroupTemplateName(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group template in zone %s", podZone)
err = framework.DeleteManagedInstanceGroup(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error deleting instance group in zone %s", podZone)
By("tainting nodes in the zone the pod is scheduled in")
selector := labels.SelectorFromSet(labels.Set(map[string]string{apis.LabelZoneFailureDomain: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()})
Expect(err).ToNot(HaveOccurred())
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
defer func() {
framework.Logf("recreating instance group %s", instanceGroup.Name)
framework.ExpectNoError(framework.CreateManagedInstanceGroup(instanceGroup.Size, podZone, templateName),
"Error recreating instance group %s in zone %s", instanceGroup.Name, podZone)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, framework.RestartNodeReadyAgainTimeout),
"Error waiting for nodes from the new instance group to become ready.")
framework.Logf("removing previously added node taints")
removeTaintFunc()
}()
err = <-waitStatus
Expect(err).ToNot(HaveOccurred(), "Error waiting for replica to be deleted during failover: %v", err)
By("deleting StatefulSet pod")
err = c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{})
// Verify the pod is scheduled in the other zone.
By("verifying the pod is scheduled in a different zone.")
var otherZone string
if cloudZones[0] == podZone {
otherZone = cloudZones[1]
} else {
otherZone = cloudZones[0]
}
err = wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) {
framework.Logf("checking whether new pod is scheduled in zone %q", otherZone)
pod = getPod(c, ns, regionalPDLabels)
nodeName = pod.Spec.NodeName
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, nil
}
newPodZone := node.Labels[apis.LabelZoneFailureDomain]
return newPodZone == otherZone, nil
})
Expect(err).NotTo(HaveOccurred(), "Error waiting for pod to be scheduled in a different zone (%q): %v", otherZone, err)
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
if err != nil {
@ -237,7 +255,6 @@ func testZonalFailover(c clientset.Interface, ns string) {
"The same PVC should be used after failover.")
By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
pod = getPod(c, ns, regionalPDLabels)
logs, err := framework.GetPodLogs(c, ns, pod.Name, "")
Expect(err).NotTo(HaveOccurred(),
"Error getting logs from pod %s in namespace %s", pod.Name, ns)
@ -246,21 +263,140 @@ func testZonalFailover(c clientset.Interface, ns string) {
Expect(lineCount).To(Equal(expectedLineCount),
"Line count of the written file should be %d.", expectedLineCount)
// Verify the pod is scheduled in the other zone.
By("verifying the pod is scheduled in a different zone.")
var otherZone string
if cloudZones[0] == podZone {
otherZone = cloudZones[1]
} else {
otherZone = cloudZones[0]
}
nodeName = pod.Spec.NodeName
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
newPodZone := node.Labels[apis.LabelZoneFailureDomain]
Expect(newPodZone).To(Equal(otherZone),
"The pod should be scheduled in zone %s after all nodes in zone %s have been deleted", otherZone, podZone)
}
func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) (removeTaint func()) {
reversePatches := make(map[string][]byte)
for _, node := range nodes {
oldData, err := json.Marshal(node)
Expect(err).NotTo(HaveOccurred())
node.Spec.Taints = append(node.Spec.Taints, v1.Taint{
Key: taintKeyPrefix + ns,
Value: podZone,
Effect: v1.TaintEffectNoSchedule,
})
newData, err := json.Marshal(node)
Expect(err).NotTo(HaveOccurred())
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
Expect(err).NotTo(HaveOccurred())
reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
Expect(err).NotTo(HaveOccurred())
reversePatches[node.Name] = reversePatchBytes
_, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
Expect(err).ToNot(HaveOccurred())
}
return func() {
for nodeName, reversePatch := range reversePatches {
_, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch)
Expect(err).ToNot(HaveOccurred())
}
}
}
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
ClaimSize: "2Gi",
DelayBinding: true,
}
suffix := "delayed-regional"
class := newStorageClass(test, ns, suffix)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
for _, pv := range pvs {
checkZoneFromLabelAndAffinity(pv, zone, false)
}
}
func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
suffix := "topo-regional"
class := newStorageClass(test, ns, suffix)
zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, zones)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
}
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
ClaimSize: "2Gi",
DelayBinding: true,
}
suffix := "topo-delayed-regional"
class := newStorageClass(test, ns, suffix)
topoZones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, topoZones)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
nodeZone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
zoneFound := false
for _, zone := range topoZones {
if zone == nodeZone {
zoneFound = true
break
}
}
if !zoneFound {
framework.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone)
}
for _, pv := range pvs {
checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true)
}
}
func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim {
@ -283,6 +419,18 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P
return &podList.Items[0]
}
func addAllowedTopologiesToStorageClass(c clientset.Interface, sc *storage.StorageClass, zones []string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: kubeletapis.LabelZoneFailureDomain,
Values: zones,
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
// Generates the spec of a StatefulSet with 1 replica that mounts a Regional PD.
func newStatefulSet(claimTemplate *v1.PersistentVolumeClaim, ns string) (sts *appsv1.StatefulSet, svc *v1.Service, labels map[string]string) {
var replicas int32 = 1
@ -334,7 +482,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
// and prints the entire file to stdout.
{
Name: "busybox",
Image: "k8s.gcr.io/busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c"},
Args: []string{
"echo ${POD_NAME} >> /mnt/data/regional-pd/pods.txt;" +

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testpattern.go"],
importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,168 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testpatterns
import (
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * framework.MiB
// FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * framework.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * framework.MiB
// FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * framework.GiB
)
// TestVolType represents a volume type to be tested in a TestSuite
type TestVolType string
var (
// InlineVolume represents a volume type that is used inline in volumeSource
InlineVolume TestVolType = "InlineVolume"
// PreprovisionedPV represents a volume type for pre-provisioned Persistent Volume
PreprovisionedPV TestVolType = "PreprovisionedPV"
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
DynamicPV TestVolType = "DynamicPV"
)
// TestPattern represents a combination of parameters to be tested in a TestSuite
type TestPattern struct {
Name string // Name of TestPattern
FeatureTag string // featureTag for the TestSuite
VolType TestVolType // Volume type of the volume
FsType string // Fstype of the volume
VolMode v1.PersistentVolumeMode // PersistentVolumeMode of the volume
}
var (
// Definitions for default fsType
// DefaultFsInlineVolume is TestPattern for "Inline-volume (default fs)"
DefaultFsInlineVolume = TestPattern{
Name: "Inline-volume (default fs)",
VolType: InlineVolume,
}
// DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)"
DefaultFsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (default fs)",
VolType: PreprovisionedPV,
}
// DefaultFsDynamicPV is TestPattern for "Dynamic PV (default fs)"
DefaultFsDynamicPV = TestPattern{
Name: "Dynamic PV (default fs)",
VolType: DynamicPV,
}
// Definitions for ext3
// Ext3InlineVolume is TestPattern for "Inline-volume (ext3)"
Ext3InlineVolume = TestPattern{
Name: "Inline-volume (ext3)",
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)"
Ext3PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext3)",
VolType: PreprovisionedPV,
FsType: "ext3",
}
// Ext3DynamicPV is TestPattern for "Dynamic PV (ext3)"
Ext3DynamicPV = TestPattern{
Name: "Dynamic PV (ext3)",
VolType: DynamicPV,
FsType: "ext3",
}
// Definitions for ext4
// Ext4InlineVolume is TestPattern for "Inline-volume (ext4)"
Ext4InlineVolume = TestPattern{
Name: "Inline-volume (ext4)",
VolType: InlineVolume,
FsType: "ext4",
}
// Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)"
Ext4PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext4)",
VolType: PreprovisionedPV,
FsType: "ext4",
}
// Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)"
Ext4DynamicPV = TestPattern{
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
}
// Definitions for xfs
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
XfsInlineVolume = TestPattern{
Name: "Inline-volume (xfs)",
VolType: InlineVolume,
FsType: "xfs",
}
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
XfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (xfs)",
VolType: PreprovisionedPV,
FsType: "xfs",
}
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
XfsDynamicPV = TestPattern{
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
}
// Definitions for Filesystem volume mode
// FsVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (filesystem)"
FsVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (filesystem volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// FsVolModeDynamicPV is TestPattern for "Dynamic PV (filesystem)"
FsVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (filesystem volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// Definitions for block volume mode
// BlockVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (block)"
BlockVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (block volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeBlock,
}
// BlockVolModeDynamicPV is TestPattern for "Dynamic PV (block)(immediate bind)"
BlockVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (block volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeBlock,
}
)

View File

@ -0,0 +1,48 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"provisioning.go",
"subpath.go",
"volume_io.go",
"volumemode.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/testsuites",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,326 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestSuite represents an interface for a set of tests which works with TestDriver
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
skipUnsupportedTest(testpatterns.TestPattern, drivers.TestDriver)
// execTest executes test of the testpattern for the driver
execTest(drivers.TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
type TestSuiteInfo struct {
name string // name of the TestSuite
featureTag string // featureTag for the TestSuite
testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
}
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// setupResource sets up test resources to be used for the tests with the
// combination of TestDriver and TestPattern
setupResource(drivers.TestDriver, testpatterns.TestPattern)
// cleanupResource clean up the test resources created in SetupResource
cleanupResource(drivers.TestDriver, testpatterns.TestPattern)
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.getTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
}
// RunTestSuite runs all testpatterns of all testSuites for a driver
func RunTestSuite(f *framework.Framework, config framework.VolumeTestConfig, driver drivers.TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns)
for _, pattern := range patterns {
suite.execTest(driver, pattern)
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether volType is supported by driver from its interface
// 2. Check if fsType is supported by driver
// 3. Check with driver specific logic
// 4. Check with testSuite specific logic
func skipUnsupportedTest(suite TestSuite, driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
// 1. Check if Whether volType is supported by driver from its interface
var isSupported bool
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(drivers.InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(drivers.PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(drivers.DynamicPVTestDriver)
default:
isSupported = false
}
if !isSupported {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 2. Check if fsType is supported by driver
if !dInfo.SupportedFsType.Has(pattern.FsType) {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
// 3. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 4. Check with testSuite specific logic
suite.skipUnsupportedTest(pattern, driver)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver drivers.TestDriver
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
sc *storagev1.StorageClass
driverTestResource interface{}
}
var _ TestResource = &genericVolumeTestResource{}
// setupResource sets up genericVolumeTestResource
func (r *genericVolumeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
r.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
fsType := pattern.FsType
volType := pattern.VolType
// Create volume for pre-provisioned volume tests
r.driverTestResource = drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource)
r.volType = dInfo.Name
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
}
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
}
case testpatterns.DynamicPV:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
claimSize := "5Gi"
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
By("creating a StorageClass " + r.sc.Name)
var err error
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
Expect(err).NotTo(HaveOccurred())
if r.sc != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.sc, false, nil)
}
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
}
default:
framework.Failf("genericVolumeTestResource doesn't support: %s", volType)
}
if r.volSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
}
}
// cleanupResource cleans up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volType := pattern.VolType
if r.pvc != nil || r.pv != nil {
switch volType {
case testpatterns.PreprovisionedPV:
By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.pv.Name, v1.PersistentVolumeReclaimDelete)
}
err := framework.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete PVC %v", r.pvc.Name)
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", r.pv.Name)
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
}
}
if r.sc != nil {
By("Deleting sc")
deleteStorageClass(f.ClientSet, r.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType, r.driverTestResource)
}
func createVolumeSourceWithPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
readOnly bool,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind")
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
readOnly bool,
volMode *v1.PersistentVolumeMode,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
By("creating a claim")
pvc := getClaim(claimSize, ns)
pvc.Spec.StorageClassName = &sc.Name
if volMode != nil {
pvc.Spec.VolumeMode = volMode
}
var err error
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
}
return &claim
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) {
err := cs.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -0,0 +1,362 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// StorageClassTest represents parameters to be used by provisioning tests
type StorageClassTest struct {
Name string
CloudProviders []string
Provisioner string
StorageClassName string
Parameters map[string]string
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(volume *v1.PersistentVolume) error
NodeName string
SkipWriteReadCheck bool
VolumeMode *v1.PersistentVolumeMode
}
type provisioningTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &provisioningTestSuite{}
// InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface
func InitProvisioningTestSuite() TestSuite {
return &provisioningTestSuite{
tsInfo: TestSuiteInfo{
name: "provisioning",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createProvisioningTestInput(driver drivers.TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) {
// Setup test resource for driver and testpattern
resource := provisioningTestResource{}
resource.setupResource(driver, pattern)
input := provisioningTestInput{
testCase: StorageClassTest{
ClaimSize: resource.claimSize,
ExpectedSize: resource.claimSize,
},
cs: driver.GetDriverInfo().Framework.ClientSet,
pvc: resource.pvc,
sc: resource.sc,
dInfo: driver.GetDriverInfo(),
}
if driver.GetDriverInfo().Config.ClientNodeName != "" {
input.testCase.NodeName = driver.GetDriverInfo().Config.ClientNodeName
}
return resource, input
}
func (p *provisioningTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(p, pattern), func() {
var (
resource provisioningTestResource
input provisioningTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(p, driver, pattern)
needsCleanup = true
// Create test input
resource, input = createProvisioningTestInput(driver, pattern)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
// to be a single struct and to be passed as a pointer.
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
testProvisioning(&input)
})
}
type provisioningTestResource struct {
driver drivers.TestDriver
claimSize string
sc *storage.StorageClass
pvc *v1.PersistentVolumeClaim
}
var _ TestResource = &provisioningTestResource{}
func (p *provisioningTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
// Setup provisioningTest resource
switch pattern.VolType {
case testpatterns.DynamicPV:
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
p.sc = dDriver.GetDynamicProvisionStorageClass("")
if p.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
p.driver = driver
p.claimSize = "5Gi"
p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Framework.Namespace.Name)
p.pvc.Spec.StorageClassName = &p.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc)
}
default:
framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType)
}
}
func (p *provisioningTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
}
type provisioningTestInput struct {
testCase StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
dInfo *drivers.DriverInfo
}
func testProvisioning(input *provisioningTestInput) {
It("should provision storage with defaults", func() {
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
It("should provision storage with mount options", func() {
if input.dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name)
}
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
It("should create and delete block persistent volumes", func() {
if !input.dInfo.IsBlockSupported {
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
}
block := v1.PersistentVolumeBlock
input.testCase.VolumeMode = &block
input.testCase.SkipWriteReadCheck = true
input.pvc.Spec.VolumeMode = &block
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume {
var err error
if class != nil {
By("creating a StorageClass " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
}
By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
// typically this claim has already been deleted
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(t.ExpectedSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
requestedCapacity := resource.MustParse(t.ClaimSize)
claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
// Check PV properties
By("checking the PV")
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
if class == nil {
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
} else {
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
}
if t.VolumeMode != nil {
Expect(pv.Spec.VolumeMode).NotTo(BeNil())
Expect(*pv.Spec.VolumeMode).To(Equal(*t.VolumeMode))
}
// Run the checker
if t.PvCheck != nil {
err = t.PvCheck(pv)
Expect(err).NotTo(HaveOccurred())
}
if !t.SkipWriteReadCheck {
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable and has the PV's mount options")
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range pv.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, "grep 'hello world' /mnt/test/data")
}
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and
// it's expected for the caller to do it.) Technically, the first few delete
// attempts may fail, as the volume is still attached to a node because
// kubelet is slowly cleaning up the previous pod, however it should succeed
// in a couple of minutes. Wait 20 minutes to recover from random cloud
// hiccups.
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
return pv
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
if len(nodeName) != 0 {
pod.Spec.NodeName = nodeName
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
body, err := c.CoreV1().Pods(ns).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}

View File

@ -0,0 +1,755 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"path/filepath"
"regexp"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
probeVolumePath = "/probe-volume"
probeFilePath = probeVolumePath + "/probe-file"
fileName = "test-file"
retryDuration = 20
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
type subPathTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &subPathTestSuite{}
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
func InitSubPathTestSuite() TestSuite {
return &subPathTestSuite{
tsInfo: TestSuiteInfo{
name: "subPath",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
subPath := f.Namespace.Name
subPathDir := filepath.Join(volumePath, subPath)
return subPathTestInput{
f: f,
subPathDir: subPathDir,
filePathInSubpath: filepath.Join(volumePath, fileName),
filePathInVolume: filepath.Join(subPathDir, fileName),
volType: resource.volType,
pod: resource.pod,
formatPod: resource.formatPod,
volSource: resource.genericVolumeTestResource.volSource,
roVol: resource.roVolSource,
}
}
func (s *subPathTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
var (
resource subPathTestResource
input subPathTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = subPathTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createSubPathTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testSubPath(&input)
})
}
type subPathTestResource struct {
genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
formatPod *v1.Pod
}
var _ TestResource = &subPathTestResource{}
func (s *subPathTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := s.driver.GetDriverInfo()
f := dInfo.Framework
fsType := pattern.FsType
volType := pattern.VolType
// Setup generic test resource
s.genericVolumeTestResource.setupResource(driver, pattern)
// Setup subPath test dependent resource
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource)
}
case testpatterns.PreprovisionedPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
default:
framework.Failf("SubPath test doesn't support: %s", volType)
}
subPath := f.Namespace.Name
config := dInfo.Config
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
s.pod.Spec.NodeName = config.ClientNodeName
s.pod.Spec.NodeSelector = config.NodeSelector
s.formatPod = volumeFormatPod(f, s.volSource)
s.formatPod.Spec.NodeName = config.ClientNodeName
s.formatPod.Spec.NodeSelector = config.NodeSelector
}
func (s *subPathTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
// Cleanup subPath test dependent resource
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
// Cleanup generic test resource
s.genericVolumeTestResource.cleanupResource(driver, pattern)
}
type subPathTestInput struct {
f *framework.Framework
subPathDir string
filePathInSubpath string
filePathInVolume string
volType string
pod *v1.Pod
formatPod *v1.Pod
volSource *v1.VolumeSource
roVol *v1.VolumeSource
}
func testSubPath(input *subPathTestInput) {
It("should support non-existent path", func() {
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing directory", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing single file", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support file as subpath", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(input.f, input.pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") {
// TODO: This skip should be removed once #61446 is fixed
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
}
testSubpathReconstruction(input.f, input.pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.filePathInVolume, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, volumePath, input.pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
}
// Initialize content in the volume while it's writable
initVolumeContent(input.f, input.pod, input.filePathInVolume, input.filePathInSubpath)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
// TODO: add a test case for the same disk with two partitions
}
// TestBasicSubpath runs basic subpath test
func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
TestBasicSubpathFile(f, contents, pod, volumePath)
}
// TestBasicSubpathFile runs basic subpath file test
func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
setReadCommand(filepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
func generateSuffixForPodName(s string) string {
// Pod name must:
// 1. consist of lower case alphanumeric characters or '-',
// 2. start and end with an alphanumeric character.
// (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
// Therefore, suffix is generated by following steps:
// 1. all strings other than [A-Za-z0-9] is replaced with "-",
// 2. add lower case alphanumeric characters at the end ('-[a-z0-9]{4}' is added),
// 3. convert the entire strings to lower case.
re := regexp.MustCompile("[^A-Za-z0-9]")
return strings.ToLower(fmt.Sprintf("%s-%s", re.ReplaceAllString(s, "-"), rand.String(4)))
}
// SubpathTestPod returns a pod spec for subpath tests
func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
var (
suffix = generateSuffixForPodName(volumeType)
gracePeriod = int64(1)
probeVolumeName = "liveness-probe-volume"
)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", suffix),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
SubPath: subpath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
{
Name: fmt.Sprintf("test-container-volume-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
{
Name: probeVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
},
}
}
// volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
// on first use
func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-ec", "echo nothing"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: "/vol",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *volumeSource,
},
},
},
}
}
func clearSubpathPodCommands(pod *v1.Pod) {
pod.Spec.InitContainers[0].Command = nil
pod.Spec.Containers[0].Args = nil
pod.Spec.Containers[1].Args = nil
}
func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command}
}
func setWriteCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file),
fmt.Sprintf("--file_mode=%v", file),
}
}
func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
existingMounts := container.VolumeMounts
container.VolumeMounts = append(existingMounts, volumeMount)
}
func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file1),
fmt.Sprintf("--new_file_0666=%v", file2),
}
}
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file",
"content of file \"" + file2 + "\": mount-tester new file",
})
}
func setReadCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", file),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
}
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
setReadCommand(file, &pod.Spec.Containers[containerIndex])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
testPodFailSubpathError(f, pod, "subPath")
}
func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
By("Checking for subpath error event")
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": f.Namespace.Name,
"reason": "Failed",
}.AsSelector().String()
err = framework.WaitTimeoutForPodEvent(f.ClientSet, pod.Name, f.Namespace.Name, selector, errorMsg, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred(), "while waiting for failed event to occur")
}
// Tests that the existing subpath mount is detected when a container restarts
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", probeFilePath},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
// Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
// Check that container has restarted
By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart")
// Fix liveness probe
By("Rewriting the file")
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
out, err = podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
// Wait for container restarts to stabilize
By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
framework.Logf("Container has restart count: %v", restarts)
}
break
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize")
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Change to busybox
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// If grace period is too short, then there is not enough time for the volume
// manager to cleanup the volumes
gracePeriod := int64(30)
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
}
func formatVolume(f *framework.Framework, pod *v1.Pod) {
By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating volume init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod")
}
func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) {
setWriteCommand(volumeFilepath, &pod.Spec.Containers[1])
setReadCommand(subpathFilepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name))
f.TestContainerOutput("subpath", pod, 0, []string{
"content of file \"" + subpathFilepath + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
// This pod spec is going to be reused; reset all the commands
clearSubpathPodCommands(pod)
}
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}

View File

@ -0,0 +1,361 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files.
*/
package testsuites
import (
"fmt"
"math"
"path/filepath"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
const mountPath = "/opt"
type volumeIOTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeIOTestSuite{}
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
func InitVolumeIOTestSuite() TestSuite {
return &volumeIOTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeIO",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
fileSizes := createFileSizes(dInfo.MaxFileSize)
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumeIOTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
volSource: *volSource,
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
podSec: v1.PodSecurityContext{
FSGroup: fsGroup,
},
fileSizes: fileSizes,
}
}
func (t *volumeIOTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumeIOTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeIOTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
execTestVolumeIO(&input)
})
}
type volumeIOTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
volSource v1.VolumeSource
testFile string
podSec v1.PodSecurityContext
fileSizes []int64
}
func execTestVolumeIO(input *volumeIOTestInput) {
It("should write files of various sizes, verify size, validate content [Slow]", func() {
f := input.f
cs := f.ClientSet
err := testVolumeIO(f, cs, input.config, input.volSource, &input.podSec, input.testFile, input.fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}
func createFileSizes(maxFileSize int64) []int64 {
allFileSizes := []int64{
testpatterns.FileSizeSmall,
testpatterns.FileSizeMedium,
testpatterns.FileSizeLarge,
}
fileSizes := []int64{}
for _, size := range allFileSizes {
if size <= maxFileSize {
fileSizes = append(fileSizes, size)
}
}
return fileSizes
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
var gracePeriod int64 = 1
volName := fmt.Sprintf("io-volume-%s", config.Namespace)
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: mountPath,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: mountPath,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / testpatterns.MinFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace))
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput)
clientPod := makePodSpec(config, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
deleteFile(clientPod, ddInput)
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `MinFileSize`
if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 {
fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize
}
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
defer func() {
deleteFile(clientPod, fpath)
}()
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
return err
}
}
return
}

View File

@ -0,0 +1,402 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
noProvisioner = "kubernetes.io/no-provisioner"
pvNamePrefix = "pv"
)
type volumeModeTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeModeTestSuite{}
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
func InitVolumeModeTestSuite() TestSuite {
return &volumeModeTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeMode",
testPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
},
}
}
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
return volumeModeTestInput{
f: f,
sc: resource.sc,
pvc: resource.pvc,
pv: resource.pv,
testVolType: pattern.VolType,
nodeName: dInfo.Config.ClientNodeName,
volMode: pattern.VolMode,
isBlockSupported: dInfo.IsBlockSupported,
}
}
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) {
dInfo := driver.GetDriverInfo()
isBlockSupported := dInfo.IsBlockSupported
volMode := pattern.VolMode
volType := pattern.VolType
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForPreprovisionedPV
}
return testVolumeModeSuccessForPreprovisionedPV
case testpatterns.DynamicPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForDynamicPV
}
return testVolumeModeSuccessForDynamicPV
default:
framework.Failf("Volume mode test doesn't support volType: %v", volType)
}
return nil
}
func (t *volumeModeTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource volumeModeTestResource
input volumeModeTestInput
testFunc func(*volumeModeTestInput)
needsCleanup bool
)
testFunc = getVolumeModeTestFunc(pattern, driver)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = volumeModeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeModeTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testFunc(&input)
})
}
type volumeModeTestResource struct {
driver drivers.TestDriver
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
driverTestResource interface{}
}
var _ TestResource = &volumeModeTestResource{}
func (s *volumeModeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
volMode := pattern.VolMode
volType := pattern.VolType
var (
scName string
pvSource *v1.PersistentVolumeSource
)
// Create volume for pre-provisioned volume tests
s.driverTestResource = drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
} else if volMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
if s.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
s.sc.VolumeBindingMode = &volBindMode
claimSize := "5Gi"
s.pvc = getClaim(claimSize, ns.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
s.pvc.Spec.VolumeMode = &volMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", volType)
}
}
func (s *volumeModeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
ns := f.Namespace
volType := pattern.VolType
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
By("Deleting sc")
if s.sc != nil {
deleteStorageClass(cs, s.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType, s.driverTestResource)
}
type volumeModeTestInput struct {
f *framework.Framework
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
testVolType testpatterns.TestVolType
nodeName string
volMode v1.PersistentVolumeMode
isBlockSupported bool
}
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
It("should fail to create pod by failing to mount volume", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
It("should fail in binding dynamic provisioned PV to PVC", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: noProvisioner,
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := framework.PersistentVolumeConfig{
PVSource: pvSource,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,
}
// PVC
pvcConfig := framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,
}
return scConfig, pvConfig, pvcConfig
}

View File

@ -0,0 +1,160 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This test checks that various VolumeSources are working.
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
type volumesTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumesTestSuite{}
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
func InitVolumesTestSuite() TestSuite {
return &volumesTestSuite{
tsInfo: TestSuiteInfo{
name: "volumes",
testPatterns: []testpatterns.TestPattern{
// Default fsType
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
// ext3
testpatterns.Ext3InlineVolume,
testpatterns.Ext3PreprovisionedPV,
testpatterns.Ext3DynamicPV,
// ext4
testpatterns.Ext4InlineVolume,
testpatterns.Ext4PreprovisionedPV,
testpatterns.Ext4DynamicPV,
// xfs
testpatterns.XfsInlineVolume,
testpatterns.XfsPreprovisionedPV,
testpatterns.XfsDynamicPV,
},
},
}
}
func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.IsPersistent {
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
}
}
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumesTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
fsGroup: fsGroup,
tests: []framework.VolumeTest{
{
Volume: *volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
},
}
}
func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumesTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumesTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testVolumes(&input)
})
}
type volumesTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
fsGroup *int64
tests []framework.VolumeTest
}
func testVolumes(input *volumesTestInput) {
It("should be mountable", func() {
f := input.f
cs := f.ClientSet
defer framework.VolumeTestCleanup(f, input.config)
volumeTest := input.tests
framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests)
})
}

View File

@ -8,17 +8,25 @@ load(
go_library(
name = "go_default_library",
srcs = [
"deployment.go",
"framework.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -0,0 +1,147 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"path"
"strings"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
// PatchCSIDeployment modifies the CSI driver deployment:
// - replaces the provisioner name
// - forces pods onto a specific host
//
// All of that is optional, see PatchCSIOptions. Just beware
// that not renaming the CSI driver deployment can be problematic:
// - when multiple tests deploy the driver, they need
// to run sequentially
// - might conflict with manual deployments
//
// This function is written so that it works for CSI driver deployments
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the provisioner binary accepts a --provisioner parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
//
// Driver deployments that are different will have to do the patching
// without this function, or skip patching entirely.
//
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
// settings are ignored. We could patch the image definitions or deprecate
// those options.
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
o.OldDriverName != o.NewDriverName
patchVolumes := func(volumes []v1.Volume) {
if !rename {
return
}
for i := range volumes {
volume := &volumes[i]
if volume.HostPath != nil {
// Update paths like /var/lib/kubelet/plugins/<provisioner>.
p := &volume.HostPath.Path
dir, file := path.Split(*p)
if file == o.OldDriverName {
*p = path.Join(dir, o.NewDriverName)
}
}
}
}
patchContainers := func(containers []v1.Container) {
for i := range containers {
container := &containers[i]
if rename {
for e := range container.Args {
// Inject test-specific provider name into paths like this one:
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
container.Args[e] = strings.Replace(container.Args[e], "/"+o.OldDriverName+"/", "/"+o.NewDriverName+"/", 1)
}
}
// Overwrite driver name resp. provider name
// by appending a parameter with the right
// value.
switch container.Name {
case o.DriverContainerName:
container.Args = append(container.Args, "--drivername="+o.NewDriverName)
case o.ProvisionerContainerName:
// Driver name is expected to be the same
// as the provisioner here.
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
}
}
}
patchPodSpec := func(spec *v1.PodSpec) {
patchContainers(spec.Containers)
patchVolumes(spec.Volumes)
if o.NodeName != "" {
spec.NodeName = o.NodeName
}
}
switch object := object.(type) {
case *appsv1.ReplicaSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.DaemonSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.StatefulSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.Deployment:
patchPodSpec(&object.Spec.Template.Spec)
case *storagev1.StorageClass:
if o.NewDriverName != "" {
// Driver name is expected to be the same
// as the provisioner name here.
object.Provisioner = o.NewDriverName
}
}
return nil
}
// PatchCSIOptions controls how PatchCSIDeployment patches the objects.
type PatchCSIOptions struct {
// The original driver name.
OldDriverName string
// The driver name that replaces the original name.
// Can be empty (not used at all) or equal to OldDriverName
// (then it will be added were appropriate without renaming
// in existing fields).
NewDriverName string
// The name of the container which has the CSI driver binary.
// If non-empty, --drivername with the new name will be
// appended to the argument list.
DriverContainerName string
// The name of the container which has the provisioner binary.
// If non-empty, --provisioner with new name will be appended
// to the argument list.
ProvisionerContainerName string
// If non-empty, all pods are forced to run on this node.
NodeName string
}

View File

@ -24,9 +24,14 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
type KubeletOpt string
@ -38,11 +43,51 @@ const (
KRestart KubeletOpt = "restart"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec wraps RunKubectl to execute a bash cmd in target pod
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
exitCode := err.ExitStatus()
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with exit code %d and error message %q",
bashExec, exitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should succeed, but failed with error message %q",
bashExec, err)
}
}
}
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
_, err := PodExec(pod, bashExec)
if err != nil {
if err, ok := err.(uexec.CodeExitError); ok {
actualExitCode := err.ExitStatus()
Expect(actualExitCode).To(Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
bashExec, exitCode, actualExitCode, err)
} else {
Expect(err).NotTo(HaveOccurred(),
"%q should fail with exit code %d, but failed with error message %q",
bashExec, exitCode, err)
}
}
Expect(err).To(HaveOccurred(), "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
@ -175,13 +220,13 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
defer func() {
if err != nil {
KubeletCommand(KStart, c, clientPod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
@ -244,7 +289,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@ -276,3 +321,165 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v2.2.0-k8s1.12",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
continue
}
_, err = roleBindingClient.Create(binding)
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
}
}
func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
// Check that writing to directory as block volume fails
VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -54,9 +55,9 @@ var _ = utils.SIGDescribe("Volume expand [Slow]", func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
test := storageClassTest{
name: "default",
claimSize: "2Gi",
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
@ -133,7 +134,7 @@ var _ = utils.SIGDescribe("Volume expand [Slow]", func() {
})
})
func createResizableStorageClass(t storageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) {
func createResizableStorageClass(t testsuites.StorageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) {
stKlass := newStorageClass(t, ns, suffix)
allowExpansion := true
stKlass.AllowVolumeExpansion = &allowExpansion

View File

@ -1,434 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files. Note that the plugin is defined inline in
* the pod spec, not via a persistent volume and claim.
*
* These tests work only when privileged containers are allowed, exporting various
* filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged
* magic in the server pod. Note that the server containers are for testing purposes
* only and should not be used in production.
*/
package storage
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
minFileSize = 1 * framework.MiB
fileSizeSmall = 1 * framework.MiB
fileSizeMedium = 100 * framework.MiB
fileSizeLarge = 1 * framework.GiB
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
fileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
fileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
fileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file.
func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / minFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
dd_input := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := minFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `minFileSize`
if math.Mod(float64(fsize), float64(minFileSize)) != 0 {
fsize = fsize/minFileSize + minFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}
// These tests need privileged containers which are disabled by default.
// TODO: support all of the plugins tested in storage/volumes.go
var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
f := framework.NewDefaultFramework("volume-io")
var (
config framework.VolumeTestConfig
cs clientset.Interface
ns string
serverIP string
serverPod *v1.Pod
volSource v1.VolumeSource
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
testFile := "nfs_io_test"
// client pod uses selinux
podSec := v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
}
BeforeEach(func() {
config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
volSource = v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium, fileSizeLarge}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
var name string
testFile := "gluster_io_test"
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci")
// create gluster server and endpoints
config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns)
name = config.Prefix + "-server"
volSource = v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil)
framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if epErr != nil || err != nil {
if epErr != nil {
framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err)
}
if err != nil {
framework.Logf("AfterEach: Gluster server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
////////////////////////////////////////////////////////////////////////
Describe("iSCSI [Feature:Volumes]", func() {
testFile := "iscsi_io_test"
BeforeEach(func() {
config, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
volSource = v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph-RBD [Feature:Volumes]", func() {
var (
secret *v1.Secret
)
testFile := "ceph-rbd_io_test"
BeforeEach(func() {
config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
volSource = v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
})

View File

@ -0,0 +1,63 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Volume limits", func() {
var (
c clientset.Interface
)
f := framework.NewDefaultFramework("volume-limits-on-node")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce", "gke")
c = f.ClientSet
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
})
It("should verify that all nodes have volume limits", func() {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
for _, node := range nodeList.Items {
volumeLimits := getVolumeLimit(&node)
if len(volumeLimits) == 0 {
framework.Failf("Expected volume limits to be set")
}
}
})
})
func getVolumeLimit(node *v1.Node) map[v1.ResourceName]int64 {
volumeLimits := map[v1.ResourceName]int64{}
nodeAllocatables := node.Status.Allocatable
for k, v := range nodeAllocatables {
if v1helper.IsAttachableVolumeResourceName(k) {
volumeLimits[k] = v.Value()
}
}
return volumeLimits
}

View File

@ -31,6 +31,7 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -52,9 +53,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
defaultScName := getDefaultStorageClassName(c)
verifyDefaultStorageClass(c, defaultScName, true)
test := storageClassTest{
name: "default",
claimSize: "2Gi",
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
pvc = newClaim(test, ns, "default")
@ -167,6 +168,127 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create metrics for total time taken in volume operations in P/V Controller", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
metricKey := "volume_operation_total_seconds_count"
dimensions := []string{"operation_name", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(controllerMetrics), metricKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in P/V Controller metrics: %q", metricKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create volume metrics in Volume Manager", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
// Metrics should have dimensions plugin_name and state available
totalVolumesKey := "volume_manager_total_volumes"
dimensions := []string{"state", "plugin_name"}
valid := hasValidMetrics(metrics.Metrics(kubeMetrics), totalVolumesKey, dimensions...)
Expect(valid).To(BeTrue(), "Invalid metric in Volume Manager metrics: %q", totalVolumesKey)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
It("should create metrics for total number of volumes in A/D Controller", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
// Get metrics
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
// Create pod
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get updated metrics
updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
}
// Forced detach metric should be present
forceDetachKey := "attachdetach_controller_forced_detaches"
_, ok := updatedControllerMetrics[forceDetachKey]
Expect(ok).To(BeTrue(), "Key %q not found in A/D Controller metrics", forceDetachKey)
// Wait and validate
totalVolumesKey := "attachdetach_controller_total_volumes"
states := []string{"actual_state_of_world", "desired_state_of_world"}
dimensions := []string{"state", "plugin_name"}
waitForADControllerStatesMetrics(metricsGrabber, totalVolumesKey, dimensions, states)
// Total number of volumes in both ActualStateofWorld and DesiredStateOfWorld
// states should be higher or equal than it used to be
oldStates := getStatesMetrics(totalVolumesKey, metrics.Metrics(controllerMetrics))
updatedStates := getStatesMetrics(totalVolumesKey, metrics.Metrics(updatedControllerMetrics))
for _, stateName := range states {
if _, ok := oldStates[stateName]; !ok {
continue
}
for pluginName, numVolumes := range updatedStates[stateName] {
oldNumVolumes := oldStates[stateName][pluginName]
Expect(numVolumes).To(BeNumerically(">=", oldNumVolumes),
"Wrong number of volumes in state %q, plugin %q: wanted >=%d, got %d",
stateName, pluginName, oldNumVolumes, numVolumes)
}
}
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count.
Describe("PVController", func() {
const (
@ -454,3 +576,61 @@ func calculateRelativeValues(originValues, updatedValues map[string]int64) map[s
}
return relativeValues
}
func hasValidMetrics(metrics metrics.Metrics, metricKey string, dimensions ...string) bool {
var errCount int
framework.Logf("Looking for sample in metric %q", metricKey)
samples, ok := metrics[metricKey]
if !ok {
framework.Logf("Key %q was not found in metrics", metricKey)
return false
}
for _, sample := range samples {
framework.Logf("Found sample %q", sample.String())
for _, d := range dimensions {
if _, ok := sample.Metric[model.LabelName(d)]; !ok {
framework.Logf("Error getting dimension %q for metric %q, sample %q", d, metricKey, sample.String())
errCount++
}
}
}
return errCount == 0
}
func getStatesMetrics(metricKey string, givenMetrics metrics.Metrics) map[string]map[string]int64 {
states := make(map[string]map[string]int64)
for _, sample := range givenMetrics[metricKey] {
framework.Logf("Found sample %q", sample.String())
state := string(sample.Metric["state"])
pluginName := string(sample.Metric["plugin_name"])
states[state] = map[string]int64{pluginName: int64(sample.Value)}
}
return states
}
func waitForADControllerStatesMetrics(metricsGrabber *metrics.MetricsGrabber, metricName string, dimensions []string, stateNames []string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
Steps: 21,
}
verifyMetricFunc := func() (bool, error) {
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Skipf("Could not get controller-manager metrics - skipping")
return false, err
}
if !hasValidMetrics(metrics.Metrics(updatedMetrics), metricName, dimensions...) {
return false, fmt.Errorf("could not get valid metrics for %q", metricName)
}
states := getStatesMetrics(metricName, metrics.Metrics(updatedMetrics))
for _, name := range stateNames {
if _, ok := states[name]; !ok {
return false, fmt.Errorf("could not get state %q from A/D Controller metrics", name)
}
}
return true, nil
}
waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc)
Expect(waitErr).NotTo(HaveOccurred(), "Timeout error fetching A/D controller metrics : %v", waitErr)
}

File diff suppressed because it is too large Load Diff

View File

@ -14,72 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
// This test is volumes test for configmap.
package storage
import (
"os/exec"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
)
func DeleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default.
var _ = utils.SIGDescribe("Volumes", func() {
f := framework.NewDefaultFramework("volume")
@ -94,277 +40,6 @@ var _ = utils.SIGDescribe("Volumes", func() {
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/nfs/index.html
ExpectedContent: "Hello from NFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
//TODO (copejon) GFS is not supported on debian image.
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
// create gluster server and endpoints
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
name := config.Prefix + "-server"
defer func() {
framework.VolumeTestCleanup(f, config)
err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
////////////////////////////////////////////////////////////////////////
// The test needs privileged containers, which are disabled by default.
// Also, make sure that iscsiadm utility and iscsi target kernel modules
// are installed on all nodes!
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
Describe("iSCSI [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
ExpectedContent: "Hello from iSCSI",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph RBD [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/rbd/create_block.sh
ExpectedContent: "Hello from RBD",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph
////////////////////////////////////////////////////////////////////////
Describe("CephFS [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/ceph/index.html
ExpectedContent: "Hello Ceph!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// OpenStack Cinder
////////////////////////////////////////////////////////////////////////
// This test assumes that OpenStack client tools are installed
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
// and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
Describe("Cinder [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("openstack")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cinder",
}
// We assume that namespace.Name is a random string
volumeName := namespace.Name
By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred())
defer DeleteCinderVolume(volumeName)
// Parse 'id'' from stdout. Expected format:
// | attachments | [] |
// | availability_zone | nova |
// ...
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
volumeID := ""
for _, line := range strings.Split(outputString, "\n") {
fields := strings.Fields(line)
if len(fields) != 5 {
continue
}
if fields[1] != "id" {
continue
}
volumeID = fields[3]
break
}
framework.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal(""))
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// GCE PD
////////////////////////////////////////////////////////////////////////
Describe("PD", func() {
var config framework.VolumeTestConfig
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
config = framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "pd",
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
NodeSelector: map[string]string{
kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
},
}
})
It("should be mountable with ext3", func() {
testGCEPD(f, config, cs, "ext3")
})
It("should be mountable with ext4", func() {
testGCEPD(f, config, cs, "ext4")
})
It("should be mountable with xfs", func() {
// xfs is not supported on gci
// and not installed by default on debian
framework.SkipUnlessNodeOSDistroIs("ubuntu")
testGCEPD(f, config, cs, "xfs")
})
})
////////////////////////////////////////////////////////////////////////
// ConfigMap
////////////////////////////////////////////////////////////////////////
Describe("ConfigMap", func() {
It("should be mountable", func() {
config := framework.VolumeTestConfig{
@ -434,139 +109,4 @@ var _ = utils.SIGDescribe("Volumes", func() {
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// vSphere
////////////////////////////////////////////////////////////////////////
Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Azure Disk
////////////////////////////////////////////////////////////////////////
Describe("Azure Disk [Feature:Volumes]", func() {
It("should be mountable [Slow]", func() {
framework.SkipUnlessProviderIs("azure")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "azure",
}
By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePDWithRetry(volumeName)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
fsType := "ext4"
readOnly := false
diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):]
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: volumeName,
FSType: &fsType,
ReadOnly: &readOnly,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Azure from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
})
func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) {
By("creating a test gce pd volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
// - Get NodeName from the pod spec to which the volume is mounted.
// - Force detach and delete.
pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client")
detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)})
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: fs,
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from GCE from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
}

View File

@ -39,9 +39,20 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
@ -53,16 +64,7 @@ go_library(
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -22,10 +22,10 @@ import (
neturl "net/url"
"sync"
"github.com/golang/glog"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"k8s.io/klog"
)
const (
@ -46,7 +46,7 @@ func Connect(ctx context.Context, vs *VSphere) error {
if vs.Client == nil {
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
@ -54,17 +54,17 @@ func Connect(ctx context.Context, vs *VSphere) error {
manager := session.NewManager(vs.Client.Client)
userSession, err := manager.UserSession(ctx)
if err != nil {
glog.Errorf("Error while obtaining user session. err: %+v", err)
klog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
klog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
vs.Client.Logout(ctx)
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
klog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
@ -74,13 +74,13 @@ func Connect(ctx context.Context, vs *VSphere) error {
func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) {
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", vs.Config.Hostname, vs.Config.Port))
if err != nil {
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
klog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password)
client, err := govmomi.NewClient(ctx, url, true)
if err != nil {
glog.Errorf("Failed to create new client. err: %+v", err)
klog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
if vs.Config.RoundTripperCount == 0 {

View File

@ -59,6 +59,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
clientPod = nil

View File

@ -19,31 +19,31 @@ package vsphere
import (
"context"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/klog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -288,7 +288,7 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
@ -353,7 +353,7 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: commands,
VolumeMounts: volumeMounts,
},
@ -478,7 +478,7 @@ func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPa
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
@ -617,7 +617,7 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
_, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
}
@ -629,7 +629,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
framework.Logf("Powering on node VM %s", nodeName)
vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
}
@ -747,7 +747,6 @@ func GetReadySchedulableNodeInfos() []*NodeInfo {
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
rand.Seed(time.Now().Unix())
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}

View File

@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
pods = append(pods, pod)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
}
@ -119,7 +119,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Deleting pod on node %s", nodeName))

View File

@ -94,10 +94,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get pod from the deployement with err: %v", err))
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := pod.Spec.NodeName