vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -1,26 +1,24 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"csi_hostpath.go",
"csi_objects.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"ephemeral_volume.go",
"flexvolume.go",
"generic_persistent_volume-disruptive.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"pv_protection.go",
"pvc_protection.go",
"regional_pd.go",
"subpath.go",
"volume_expand.go",
"volume_io.go",
"volume_metrics.go",
@ -28,10 +26,9 @@ go_library(
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/apis:go_default_library",
@ -42,6 +39,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
@ -70,6 +68,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
@ -77,6 +76,7 @@ go_library(
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
@ -95,4 +95,5 @@ filegroup(
"//test/e2e/storage/vsphere:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,199 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
)
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiExternalProvisionerImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiDriverRegistrarImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiExternalAttacherImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiHostPathPluginImage,
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}

View File

@ -0,0 +1,415 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
)
var csiImageVersions = map[string]string{
"hostpathplugin": "v0.2.0",
"csi-attacher": "v0.2.0",
"csi-provisioner": "v0.2.1",
"driver-registrar": "v0.2.0",
}
func csiContainerImage(image string) string {
var fullName string
fullName += framework.TestContext.CSIImageRegistry + "/" + image + ":"
if framework.TestContext.CSIImageVersion != "" {
fullName += framework.TestContext.CSIImageVersion
} else {
fullName += csiImageVersions[image]
}
return fullName
}
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
creatingString := "Creating"
if teardown {
creatingString = "Deleting"
}
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
}
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiContainerImage("csi-provisioner"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiContainerImage("driver-registrar"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiContainerImage("csi-attacher"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiContainerImage("hostpathplugin"),
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}
func deployGCEPDCSIDriver(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
nodeSA *v1.ServiceAccount,
controllerSA *v1.ServiceAccount,
) {
// Get API Objects from manifests
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
framework.ExpectNoError(err, "Failed to create Service from manifest")
// Got all objects from manifests now try to delete objects
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
}
}
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
}
}
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
}
}
if teardown {
return
}
// Create new API Objects through client
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,164 +17,40 @@ limitations under the License.
package storage
import (
"fmt"
"math/rand"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
)
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *v1.ServiceAccount {
serviceAccountName := config.Prefix + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
type csiTestDriver interface {
createCSIDriver()
cleanupCSIDriver()
createStorageClassTest(node v1.Node) storageClassTest
}
func csiClusterRole(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *rbacv1.ClusterRole {
clusterRoleClient := client.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-cluster-role",
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"create", "delete", "get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"volumeattachments"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: []string{"get", "list", "watch"},
},
},
}
clusterRoleClient.Delete(role.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleClient.Get(role.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
}
func csiClusterRoleBinding(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRole *rbacv1.ClusterRole,
) *rbacv1.ClusterRoleBinding {
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRole.GetName(),
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
return ret
}
var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
var _ = utils.SIGDescribe("CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
var (
@ -196,47 +72,154 @@ var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
ServerNodeName: node.Name,
WaitForCompletion: true,
}
csiDriverRegistrarClusterRole(config)
})
// Create one of these for each of the drivers to be tested
// CSI hostPath driver test
Describe("Sanity CSI plugin test using hostPath CSI driver", func() {
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
var (
clusterRole *rbacv1.ClusterRole
serviceAccount *v1.ServiceAccount
)
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
var (
driver csiTestDriver
)
BeforeEach(func() {
By("deploying csi hostpath driver")
clusterRole = csiClusterRole(cs, config, false)
serviceAccount = csiServiceAccount(cs, config, false)
csiClusterRoleBinding(cs, config, false, serviceAccount, clusterRole)
csiHostPathPod(cs, config, false, f, serviceAccount)
BeforeEach(func() {
driver = curInitCSIDriver(f, config)
driver.createCSIDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
})
It("should provision storage", func() {
t := driver.createStorageClassTest(node)
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
AfterEach(func() {
By("uninstalling csi hostpath driver")
csiHostPathPod(cs, config, true, f, serviceAccount)
csiClusterRoleBinding(cs, config, true, serviceAccount, clusterRole)
serviceAccount = csiServiceAccount(cs, config, true)
clusterRole = csiClusterRole(cs, config, true)
})
It("should provision storage with a hostPath CSI driver", func() {
t := storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
}
})
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
return &hostpathCSIDriver{
combinedClusterRoleNames: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
},
f: f,
config: config,
}
}
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
return storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
}
func (h *hostpathCSIDriver) createCSIDriver() {
By("deploying csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, h.serviceAccount)
}
func (h *hostpathCSIDriver) cleanupCSIDriver() {
By("uninstalling csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
csiHostPathPod(cs, config, true, f, h.serviceAccount)
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
}
type gcePDCSIDriver struct {
controllerClusterRoles []string
nodeClusterRoles []string
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
nodeClusterRoles: []string{
csiDriverRegistrarClusterRoleName,
},
controllerClusterRoles: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
},
f: f,
config: config,
}
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
}
}
func (g *gcePDCSIDriver) createCSIDriver() {
By("deploying gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
func (g *gcePDCSIDriver) cleanupCSIDriver() {
By("uninstalling gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
}

View File

@ -374,7 +374,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() {
err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()

View File

@ -0,0 +1,138 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = utils.SIGDescribe("Ephemeralstorage", func() {
var (
c clientset.Interface
)
f := framework.NewDefaultFramework("pv")
BeforeEach(func() {
c = f.ClientSet
})
Describe("When pod refers to non-existent ephemeral storage", func() {
for _, testSource := range invalidEphemeralSource("pod-ephm-test") {
It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() {
pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source)
pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
// Allow it to sleep for 30 seconds
time.Sleep(30 * time.Second)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
}
})
})
type ephemeralTestInfo struct {
volumeType string
source *v1.VolumeSource
}
func testEphemeralVolumePod(f *framework.Framework, volumeType string, source *v1.VolumeSource) *v1.Pod {
var (
suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-ephm-test-%s", suffix),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
},
},
}
}
func invalidEphemeralSource(suffix string) []ephemeralTestInfo {
testInfo := []ephemeralTestInfo{
{
volumeType: "secret",
source: &v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: fmt.Sprintf("secert-%s", suffix),
},
},
},
{
volumeType: "configmap",
source: &v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: fmt.Sprintf("configmap-%s", suffix),
},
},
},
},
{
volumeType: "projected",
source: &v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: fmt.Sprintf("secret-%s", suffix),
},
},
},
},
},
},
},
}
return testInfo
}

View File

@ -0,0 +1,103 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
f := framework.NewDefaultFramework("generic-disruptive-pv")
var (
c clientset.Interface
ns string
)
BeforeEach(func() {
// Skip tests unless number of nodes is 2
framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipIfProviderIs("local")
c = f.ClientSet
ns = f.Namespace.Name
})
disruptiveTestTable := []disruptiveTest{
{
testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.",
runTest: utils.TestKubeletRestartsAndRestoresMount,
},
{
testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromDeletedPod,
},
{
testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
},
}
Context("When kubelet restarts", func() {
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest.
var (
clientPod *v1.Pod
pvc *v1.PersistentVolumeClaim
)
BeforeEach(func() {
framework.Logf("Initializing pod and pvcs for test")
clientPod, pvc = createPodPVCFromSC(f, c, ns)
})
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod)
})
}(test)
}
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, nil)
pvc, clientPod = nil, nil
})
})
})
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) {
var err error
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
pvc := newClaim(test, ns, "default")
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test")
return pod, pvc
}

View File

@ -21,8 +21,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() {
var _ = utils.SIGDescribe("Mounted volume expand[Slow]", func() {
var (
c clientset.Interface
ns string
@ -113,7 +113,8 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
defer c.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
@ -152,7 +153,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume
})
})
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *extensions.Deployment) (v1.Pod, error) {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type disruptiveTest struct {
testItStmt string
runTest testBody
@ -41,7 +41,7 @@ const (
MinNodes = 2
)
var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
f := framework.NewDefaultFramework("disruptive-pv")
var (
@ -78,11 +78,10 @@ var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
},
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
Selector: selector,
StorageClassName: &emptyStorageClass,
}
// Get the first ready node IP that is not hosting the NFS pod.
if clientNodeIP == "" {
@ -234,7 +233,7 @@ var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod, pvc, pv)
t.runTest(c, f, clientPod)
})
}(test)
}
@ -280,5 +279,7 @@ func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string,
// Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
if pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
}
}

View File

@ -38,7 +38,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -70,6 +69,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.SkipIfMultizone(cs)
podClient = cs.CoreV1().Pods(ns)
nodeClient = cs.CoreV1().Nodes()
nodes = framework.GetReadySchedulableNodesOrDie(cs)
@ -539,7 +540,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "pd-test-" + string(uuid.NewUUID()),

View File

@ -93,11 +93,10 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
Selector: selector,
StorageClassName: &emptyStorageClass,
}
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, pvcConfig, false)
node = types.NodeName(clientPod.Spec.NodeName)

View File

@ -63,26 +63,44 @@ type localVolumeType string
const (
// default local volume type, aka a directory
DirectoryLocalVolumeType localVolumeType = "dir"
// like DirectoryLocalVolumeType but it's a symbolic link to directory
DirectoryLinkLocalVolumeType localVolumeType = "dir-link"
// like DirectoryLocalVolumeType but bind mounted
DirectoryBindMountedLocalVolumeType localVolumeType = "dir-bindmounted"
// like DirectoryLocalVolumeType but it's a symbolic link to self bind mounted directory
// Note that bind mounting at symbolic link actually mounts at directory it
// links to.
DirectoryLinkBindMountedLocalVolumeType localVolumeType = "dir-link-bindmounted"
// creates a tmpfs and mounts it
TmpfsLocalVolumeType localVolumeType = "tmpfs"
// tests based on local ssd at /mnt/disks/by-uuid/
GCELocalSSDVolumeType localVolumeType = "gce-localssd-scsi-fs"
// Creates a local file, formats it, and maps it as a block device.
BlockLocalVolumeType localVolumeType = "block"
// Creates a local file, formats it, and mounts it to use as local volume.
BlockFsLocalVolumeType localVolumeType = "blockfs"
)
var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *localTestVolume{
GCELocalSSDVolumeType: setupLocalVolumeGCELocalSSD,
TmpfsLocalVolumeType: setupLocalVolumeTmpfs,
DirectoryLocalVolumeType: setupLocalVolumeDirectory,
BlockLocalVolumeType: setupLocalVolumeBlock,
GCELocalSSDVolumeType: setupLocalVolumeGCELocalSSD,
TmpfsLocalVolumeType: setupLocalVolumeTmpfs,
DirectoryLocalVolumeType: setupLocalVolumeDirectory,
DirectoryLinkLocalVolumeType: setupLocalVolumeDirectoryLink,
DirectoryBindMountedLocalVolumeType: setupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: setupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: setupLocalVolumeBlock,
BlockFsLocalVolumeType: setupLocalVolumeBlockFs,
}
var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTestVolume){
GCELocalSSDVolumeType: cleanupLocalVolumeGCELocalSSD,
TmpfsLocalVolumeType: cleanupLocalVolumeTmpfs,
DirectoryLocalVolumeType: cleanupLocalVolumeDirectory,
BlockLocalVolumeType: cleanupLocalVolumeBlock,
GCELocalSSDVolumeType: cleanupLocalVolumeGCELocalSSD,
TmpfsLocalVolumeType: cleanupLocalVolumeTmpfs,
DirectoryLocalVolumeType: cleanupLocalVolumeDirectory,
DirectoryLinkLocalVolumeType: cleanupLocalVolumeDirectoryLink,
DirectoryBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryBindMounted,
DirectoryLinkBindMountedLocalVolumeType: cleanupLocalVolumeDirectoryLinkBindMounted,
BlockLocalVolumeType: cleanupLocalVolumeBlock,
BlockFsLocalVolumeType: cleanupLocalVolumeBlockFs,
}
type localTestVolume struct {
@ -103,8 +121,7 @@ type localTestVolume struct {
const (
// TODO: This may not be available/writable on all images.
hostBase = "/tmp"
containerBase = "/myvol"
hostBase = "/tmp"
// Path to the first volume in the test containers
// created via createLocalPod or makeLocalPod
// leveraging pv_util.MakePod
@ -122,7 +139,7 @@ const (
// volumeConfigName is the configmap passed to bootstrapper and provisioner
volumeConfigName = "local-volume-config"
// provisioner image used for e2e tests
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.0.0"
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.1.0"
// provisioner daemonSetName name
daemonSetName = "local-volume-provisioner"
// provisioner default mount point folder
@ -135,6 +152,9 @@ const (
// A sample request size
testRequestSize = "10Mi"
// Max number of nodes to use for testing
maxNodes = 5
)
var (
@ -156,9 +176,18 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
// Get all the schedulable nodes
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
// Cap max number of nodes
maxLen := len(nodes.Items)
if maxLen > maxNodes {
maxLen = maxNodes
}
scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
// Choose the first node
node0 := &nodes.Items[0]
@ -167,7 +196,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
config = &localTestConfig{
ns: f.Namespace.Name,
client: f.ClientSet,
nodes: nodes.Items,
nodes: nodes.Items[:maxLen],
node0: node0,
scName: scName,
ssTester: ssTester,
@ -175,8 +204,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}
})
localVolumeTypes := []localVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType, GCELocalSSDVolumeType, BlockLocalVolumeType}
for _, tempTestVolType := range localVolumeTypes {
for tempTestVolType := range setupLocalVolumeMap {
// New variable required for gingko test closures
testVolType := tempTestVolType
@ -216,7 +244,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
BeforeEach(func() {
By("Creating pod1")
pod1, pod1Err = createLocalPod(config, testVol)
pod1, pod1Err = createLocalPod(config, testVol, nil)
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
})
@ -254,19 +282,76 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
})
})
Context("Set fsGroup for local volume", func() {
BeforeEach(func() {
if testVolType == BlockLocalVolumeType {
framework.Skipf("We don't set fsGroup on block device, skipped.")
}
})
It("should set fsGroup for one pod", func() {
By("Checking fsGroup is set")
pod := createPodWithFsGroupTest(config, testVol, 1234, 1234)
By("Deleting pod")
framework.DeletePodOrFail(config.client, config.ns, pod.Name)
})
It("should set same fsGroup for two pods simultaneously", func() {
fsGroup := int64(1234)
By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup)
By("Create second pod with same fsGroup and check fsGroup is correct")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup)
By("Deleting first pod")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Deleting second pod")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
})
It("should set different fsGroup for second pod if first pod is deleted", func() {
fsGroup1, fsGroup2 := int64(1234), int64(4321)
By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1)
By("Deleting first pod")
err := framework.DeletePodWithWait(f, config.client, pod1)
Expect(err).NotTo(HaveOccurred(), "while deleting first pod")
By("Create second pod and check fsGroup is the new one")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2)
By("Deleting second pod")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
})
It("should not set different fsGroups for two pods simultaneously", func() {
fsGroup1, fsGroup2 := int64(1234), int64(4321)
By("Create first pod and check fsGroup is set")
pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1)
By("Create second pod and check fsGroup is still the old one")
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup1)
ep := &eventPatterns{
reason: "AlreadyMountedVolume",
pattern: make([]string, 2),
}
ep.pattern = append(ep.pattern, fmt.Sprintf("The requested fsGroup is %d", fsGroup2))
ep.pattern = append(ep.pattern, "The volume may not be shareable.")
checkPodEvents(config, pod2.Name, ep)
By("Deleting first pod")
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Deleting second pod")
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
})
})
})
}
Context("Local volume that cannot be mounted [Slow]", func() {
// TODO:
// - make the pod create timeout shorter
// - check for these errors in unit tests intead
It("should fail due to non-existent path", func() {
ep := &eventPatterns{
reason: "FailedMount",
pattern: make([]string, 2)}
ep.pattern = append(ep.pattern, "MountVolume.SetUp failed")
ep.pattern = append(ep.pattern, "does not exist")
testVol := &localTestVolume{
node: config.node0,
@ -275,7 +360,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}
By("Creating local PVC and PV")
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
pod, err := createLocalPod(config, testVol)
pod, err := createLocalPod(config, testVol, nil)
Expect(err).To(HaveOccurred())
checkPodEvents(config, pod.Name, ep)
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
@ -299,7 +384,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodNameRunningInNamespace(config.client, pod.Name, pod.Namespace)
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
Expect(err).To(HaveOccurred())
checkPodEvents(config, pod.Name, ep)
@ -393,6 +478,50 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should not create local persistent volume for filesystem volume that was not bind mounted", func() {
directoryPath := filepath.Join(config.discoveryDir, "notbindmount")
By("Creating a directory, not bind mounted, in discovery directory")
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", directoryPath)
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
By("Allowing provisioner to run for 30s and discover potential local PVs")
time.Sleep(30 * time.Second)
By("Examining provisioner logs for not an actual mountpoint message")
provisionerPodName := findProvisionerDaemonsetPodName(config)
logs, err := framework.GetPodLogs(config.client, config.ns, provisionerPodName, "" /*containerName*/)
Expect(err).NotTo(HaveOccurred(),
"Error getting logs from pod %s in namespace %s", provisionerPodName, config.ns)
expectedLogMessage := "Path \"/mnt/local-storage/notbindmount\" is not an actual mountpoint"
Expect(strings.Contains(logs, expectedLogMessage)).To(BeTrue())
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should discover dynamicly created local persistent volume mountpoint in discovery directory", func() {
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
By("Creating a volume in discovery directory")
dynamicVolumePath := path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
setupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
By("Waiting for the PersistentVolume to be created")
_, err := waitForLocalPersistentVolume(config.client, dynamicVolumePath)
Expect(err).NotTo(HaveOccurred())
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
By("Deleting volume in discovery directory")
cleanupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
})
})
Context("StatefulSet with pod anti-affinity", func() {
@ -512,7 +641,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pvcs = append(pvcs, pvc)
}
pod := framework.MakeSecPod(config.ns, pvcs, false, "sleep 1", false, false, selinuxLabel)
pod := framework.MakeSecPod(config.ns, pvcs, false, "sleep 1", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
pods[pod.Name] = pod
@ -627,7 +756,7 @@ func checkPodEvents(config *localTestConfig, podName string, ep *eventPatterns)
// Test two pods at the same time, write from pod1, and read from pod2
func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
By("Creating pod1 to write to the PV")
pod1, pod1Err := createLocalPod(config, testVol)
pod1, pod1Err := createLocalPod(config, testVol, nil)
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
@ -635,7 +764,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
By("Creating pod2 to read from the PV")
pod2, pod2Err := createLocalPod(config, testVol)
pod2, pod2Err := createLocalPod(config, testVol, nil)
Expect(pod2Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod2, config.node0.Name)
@ -659,7 +788,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
// Test two pods one after other, write from pod1, and read from pod2
func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) {
By("Creating pod1")
pod1, pod1Err := createLocalPod(config, testVol)
pod1, pod1Err := createLocalPod(config, testVol, nil)
Expect(pod1Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod1, config.node0.Name)
@ -675,7 +804,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
By("Creating pod2")
pod2, pod2Err := createLocalPod(config, testVol)
pod2, pod2Err := createLocalPod(config, testVol, nil)
Expect(pod2Err).NotTo(HaveOccurred())
verifyLocalPod(config, testVol, pod2, config.node0.Name)
@ -686,6 +815,15 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
}
// Test creating pod with fsGroup, and check fsGroup is expected fsGroup.
func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, fsGroup int64, expectedFsGroup int64) *v1.Pod {
pod, err := createLocalPod(config, testVol, &fsGroup)
framework.ExpectNoError(err)
_, err = framework.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3)
Expect(err).NotTo(HaveOccurred(), "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name)
return pod
}
func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMode) {
sc := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
@ -777,6 +915,39 @@ func setupLocalVolumeDirectory(config *localTestConfig, node *v1.Node) *localTes
return setupWriteTestFile(hostDir, config, DirectoryLocalVolumeType, node)
}
func setupLocalVolumeDirectoryLink(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkLocalVolumeType, node)
}
func setupLocalVolumeDirectoryBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryBindMountedLocalVolumeType, node)
}
func setupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && ln -s %s %s",
hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
// Populate volume with testFile containing testFileContent.
return setupWriteTestFile(hostDir, config, DirectoryLinkBindMountedLocalVolumeType, node)
}
func setupLocalVolumeBlock(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
@ -789,6 +960,23 @@ func setupLocalVolumeBlock(config *localTestConfig, node *v1.Node) *localTestVol
return volume
}
func setupLocalVolumeBlockFs(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
hostDir := filepath.Join(hostBase, testDirName)
createAndMapBlockLocalVolume(config, hostDir, node)
loopDev := getBlockLoopDev(hostDir, node)
// format and mount at hostDir
// give others rwx for read/write testing
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, hostDir, hostDir)
_, err := framework.IssueSSHCommandWithResult(cmd, framework.TestContext.Provider, node)
Expect(err).NotTo(HaveOccurred())
// Populate block volume with testFile containing testFileContent.
volume := setupWriteTestFile(hostDir, config, BlockFsLocalVolumeType, node)
volume.hostDir = hostDir
volume.loopDevDir = loopDev
return volume
}
// Determine the /dev/loopXXX device associated with this test, via its hostDir.
func getBlockLoopDev(hostDir string, node *v1.Node) string {
loopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", hostDir)
@ -834,6 +1022,35 @@ func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolum
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
func cleanupLocalVolumeDirectoryLink(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
func cleanupLocalVolumeDirectoryBindMounted(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
hostDir := volume.hostDir
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
func cleanupLocalVolumeDirectoryLinkBindMounted(config *localTestConfig, volume *localTestVolume) {
By("Removing the test directory")
hostDir := volume.hostDir
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV and removes the test directory holding the block file.
func cleanupLocalVolumeBlock(config *localTestConfig, volume *localTestVolume) {
volume.hostDir = volume.loopDevDir
@ -844,6 +1061,19 @@ func cleanupLocalVolumeBlock(config *localTestConfig, volume *localTestVolume) {
Expect(err).NotTo(HaveOccurred())
}
// Deletes the PVC/PV and removes the test directory holding the block file.
func cleanupLocalVolumeBlockFs(config *localTestConfig, volume *localTestVolume) {
// umount first
By("Umount blockfs mountpoint")
umountCmd := fmt.Sprintf("sudo umount %s", volume.hostDir)
err := framework.IssueSSHCommand(umountCmd, framework.TestContext.Provider, volume.node)
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
err = framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
Expect(err).NotTo(HaveOccurred())
}
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig {
pvcConfig := framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
@ -931,7 +1161,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
}
func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel)
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel, nil)
if pod == nil {
return pod
}
@ -943,7 +1173,7 @@ func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string)
}
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
if pod == nil {
return
}
@ -969,7 +1199,7 @@ func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolu
}
func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
if pod == nil {
return
}
@ -981,7 +1211,7 @@ func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolu
}
func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
if pod == nil {
return
}
@ -991,7 +1221,7 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
// createSecPod should be used when Pod requires non default SELinux labels
func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
pod, err := framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", hostIPC, hostPID, seLinuxLabel)
pod, err := framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", hostIPC, hostPID, seLinuxLabel, nil, framework.PodStartShortTimeout)
podNodeName, podNodeNameErr := podNodeName(config, pod)
Expect(podNodeNameErr).NotTo(HaveOccurred())
framework.Logf("Security Context POD %q created on Node %q", pod.Name, podNodeName)
@ -999,9 +1229,9 @@ func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool
return pod, err
}
func createLocalPod(config *localTestConfig, volume *localTestVolume) (*v1.Pod, error) {
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
By("Creating a pod")
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
}
func createAndMountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
@ -1282,6 +1512,8 @@ func createVolumeConfigMap(config *localTestConfig) {
func createProvisionerDaemonset(config *localTestConfig) {
provisionerPrivileged := true
mountProp := v1.MountPropagationHostToContainer
provisioner := &extv1beta1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
@ -1332,8 +1564,9 @@ func createProvisionerDaemonset(config *localTestConfig) {
MountPath: "/etc/provisioner/config/",
},
{
Name: "local-disks",
MountPath: provisionerDefaultMountRoot,
Name: "local-disks",
MountPath: provisionerDefaultMountRoot,
MountPropagation: &mountProp,
},
},
},
@ -1369,6 +1602,22 @@ func createProvisionerDaemonset(config *localTestConfig) {
framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind)
}
func findProvisionerDaemonsetPodName(config *localTestConfig) string {
podList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil {
framework.Failf("could not get the pod list: %v", err)
return ""
}
pods := podList.Items
for _, pod := range pods {
if strings.HasPrefix(pod.Name, daemonSetName) && pod.Spec.NodeName == config.node0.Name {
return pod.Name
}
}
framework.Failf("Unable to find provisioner daemonset pod on node0")
return ""
}
func deleteProvisionerDaemonset(config *localTestConfig) {
ds, err := config.client.ExtensionsV1beta1().DaemonSets(config.ns).Get(daemonSetName, metav1.GetOptions{})
if ds == nil {

View File

@ -132,11 +132,10 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
},
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
Selector: selector,
StorageClassName: &emptyStorageClass,
}
})

View File

@ -64,11 +64,10 @@ var _ = utils.SIGDescribe("PV Protection", func() {
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
Selector: selector,
StorageClassName: &emptyStorageClass,
}
By("Creating a PV")

View File

@ -21,6 +21,9 @@ import (
. "github.com/onsi/gomega"
"fmt"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
@ -34,8 +37,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"strings"
"time"
)
const (
@ -43,7 +44,7 @@ const (
statefulSetReadyTimeout = 3 * time.Minute
)
var _ = utils.SIGDescribe("Regional PD [Feature:RegionalPD]", func() {
var _ = utils.SIGDescribe("Regional PD", func() {
f := framework.NewDefaultFramework("regional-pd")
// filled in BeforeEach
@ -204,6 +205,9 @@ func testZonalFailover(c clientset.Interface, ns string) {
instanceGroup, err := cloud.GetInstanceGroup(instanceGroupName, podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group %s in zone %s", instanceGroupName, podZone)
templateName, err := framework.GetManagedInstanceGroupTemplateName(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group template in zone %s", podZone)
err = framework.DeleteManagedInstanceGroup(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error deleting instance group in zone %s", podZone)
@ -211,9 +215,6 @@ func testZonalFailover(c clientset.Interface, ns string) {
defer func() {
framework.Logf("recreating instance group %s", instanceGroup.Name)
// HACK improve this when Managed Instance Groups are available through the cloud provider API
templateName := strings.Replace(instanceGroupName, "group", "template", 1 /* n */)
framework.ExpectNoError(framework.CreateManagedInstanceGroup(instanceGroup.Size, podZone, templateName),
"Error recreating instance group %s in zone %s", instanceGroup.Name, podZone)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, framework.RestartNodeReadyAgainTimeout),
@ -333,7 +334,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
// and prints the entire file to stdout.
{
Name: "busybox",
Image: "gcr.io/google_containers/busybox",
Image: "k8s.gcr.io/busybox",
Command: []string{"sh", "-c"},
Args: []string{
"echo ${POD_NAME} >> /mnt/data/regional-pd/pods.txt;" +

1078
vendor/k8s.io/kubernetes/test/e2e/storage/subpath.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],

View File

@ -24,7 +24,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -138,7 +137,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
By("Writing to the volume.")
file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
@ -156,18 +155,26 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcelly deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDelete bool) {
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
By("Expecting the volume subpath mount to be found.")
result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
defer func() {
@ -182,11 +189,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodTerminated(clientPod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
if err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to be not found.")
}
if forceDelete {
@ -194,22 +202,32 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
// so wait some time to finish
time.Sleep(30 * time.Second)
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
if checkSubpath {
By("Expecting the volume subpath mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, false)
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
}
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcelly deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, true)
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.

View File

@ -39,7 +39,7 @@ const (
totalResizeWaitPeriod = 20 * time.Minute
)
var _ = utils.SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() {
var _ = utils.SIGDescribe("Volume expand [Slow]", func() {
var (
c clientset.Interface
ns string

View File

@ -33,6 +33,7 @@ import (
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -64,6 +65,7 @@ var md5hashes = map[int64]string{
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -110,7 +112,8 @@ func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc
},
},
},
SecurityContext: podSecContext,
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
@ -209,6 +212,9 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
@ -379,33 +385,11 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
Describe("Ceph-RBD [Feature:Volumes]", func() {
var (
secret *v1.Secret
name string
)
testFile := "ceph-rbd_io_test"
BeforeEach(func() {
config, serverPod, serverIP = framework.NewRBDServer(cs, ns)
name = config.Prefix + "-server"
// create server secret
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
// from test/images/volumes-tester/rbd/keyring
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/rbd",
}
var err error
secret, err = cs.CoreV1().Secrets(ns).Create(secret)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("BeforeEach: failed to create secret %q for Ceph-RBD: %v", name, err))
config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
volSource = v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
@ -413,22 +397,22 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: name,
Name: secret.Name,
},
FSType: "ext2",
ReadOnly: true,
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", name)
secErr := cs.CoreV1().Secrets(ns).Delete(name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", err)
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)

View File

@ -30,6 +30,7 @@ import (
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/api/core/v1"
@ -41,7 +42,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -56,6 +56,8 @@ type storageClassTest struct {
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
nodeName string
attach bool
volumeMode *v1.PersistentVolumeMode
}
const (
@ -119,6 +121,10 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
}
if t.volumeMode != nil {
Expect(pv.Spec.VolumeMode).NotTo(BeNil())
Expect(*pv.Spec.VolumeMode).To(Equal(*t.volumeMode))
}
// Run the checker
if t.pvCheck != nil {
@ -126,24 +132,25 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
Expect(err).NotTo(HaveOccurred())
}
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable and has the PV's mount options")
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range pv.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
if t.attach {
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable and has the PV's mount options")
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range pv.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
}
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@ -249,6 +256,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []storageClassTest{
// GCE/GKE
{
name: "SSD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
@ -376,6 +384,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
expectedSize: "1.5Gi",
pvCheck: nil,
},
// Azure
{
name: "Azure disk volume with empty sku and location",
cloudProviders: []string{"azure"},
@ -409,7 +418,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
testDynamicProvisioning(test, c, claim, class)
}
// Run the last test with storage.k8s.io/v1beta1 and beta annotation on pvc
// Run the last test with storage.k8s.io/v1beta1 on pvc
if betaTest != nil {
By("Testing " + betaTest.name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta")
@ -419,9 +428,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta")
claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: class.Name,
}
claim.Spec.StorageClassName = &(class.Name)
testDynamicProvisioning(*betaTest, c, claim, nil)
}
})
@ -483,9 +490,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
testDynamicProvisioning(test, c, claim, class)
})
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout)
// when there is no regression.
It("should not provision a volume in an unmanaged GCE zone. [Slow]", func() {
It("should not provision a volume in an unmanaged GCE zone.", func() {
framework.SkipUnlessProviderIs("gce", "gke")
var suffix string = "unmananged"
@ -538,7 +543,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
})
@ -591,6 +596,80 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}
framework.Logf("0 PersistentVolumes remain.")
})
It("deletion should be idempotent", func() {
// This test ensures that deletion of a volume is idempotent.
// It creates a PV with Retain policy, deletes underlying AWS / GCE
// volume and changes the reclaim policy to Delete.
// PV controller should delete the PV even though the underlying volume
// is already deleted.
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
By("creating PV")
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "volume-idempotent-delete-",
},
Spec: v1.PersistentVolumeSpec{
// Use Retain to keep the PV, the test will change it to Delete
// when the time comes.
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
// PV is bound to non-existing PVC, so it's reclaim policy is
// executed immediately
ClaimRef: &v1.ObjectReference{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
UID: types.UID("01234567890"),
Namespace: ns,
Name: "dummy-claim-name",
},
},
}
switch framework.TestContext.Provider {
case "aws":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
},
}
case "gce", "gke":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
},
}
}
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err)
By("waiting for the PV to get Released")
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout)
framework.ExpectNoError(err)
By("deleting the PD")
err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource)
framework.ExpectNoError(err)
By("changing the PV reclaim policy")
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
pv, err = c.CoreV1().PersistentVolumes().Update(pv)
framework.ExpectNoError(err)
By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
Describe("DynamicProvisioner External", func() {
@ -617,14 +696,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
expectedSize: "1500Mi",
}
class := newStorageClass(test, ns, "external")
className := class.Name
claim := newClaim(test, ns, "external")
// the external provisioner understands Beta only right now, see
// https://github.com/kubernetes-incubator/external-storage/issues/37
// claim.Spec.StorageClassName = &className
claim.Annotations = map[string]string{
v1.BetaStorageClassAnnotation: className,
}
claim.Spec.StorageClassName = &(class.Name)
By("creating a claim with a external provisioning annotation")
testDynamicProvisioning(test, c, claim, class)
@ -654,7 +727,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
})
// Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by changing the default annotation[Slow] [Serial] [Disruptive]", func() {
It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName := getDefaultStorageClassName(c)
test := storageClassTest{
@ -676,7 +749,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
@ -685,7 +758,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
})
// Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by removing the default annotation[Slow] [Serial] [Disruptive]", func() {
It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName := getDefaultStorageClassName(c)
test := storageClassTest{
@ -707,7 +780,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
@ -715,6 +788,69 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
})
})
framework.KubeDescribe("GlusterDynamicProvisioner", func() {
It("should create and delete persistent volumes [fast]", func() {
By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns)
serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := storageClassTest{
name: "Gluster Dynamic provisioner test",
provisioner: "kubernetes.io/glusterfs",
claimSize: "2Gi",
expectedSize: "2Gi",
parameters: map[string]string{"resturl": serverUrl},
attach: false,
}
// GCE/GKE
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// Keeping an extra condition here based on below facts:
//*) gce-pd rounds up to the next gb.
//*) GlusterFS provisioner rounduptoGiB() and send it to backend,
// which does 'size/number' from provisioner*1024*1024*1024
test.claimSize = "2Gi"
test.expectedSize = "3G"
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
testDynamicProvisioning(test, c, claim, class)
})
})
Describe("Block volume provisioning [Feature:BlockVolume]", func() {
It("should create and delete block persistent volumes", func() {
// TODO: add openstack once Cinder volume plugin supports block volumes
framework.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
By("creating a claim with default class")
block := v1.PersistentVolumeBlock
test := storageClassTest{
name: "default",
claimSize: "2Gi",
expectedSize: "2Gi",
volumeMode: &block,
}
// gce or gke
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// using GB not GiB as e2e test unit since gce-pd returns GB,
// or expectedSize may be greater than claimSize.
test.claimSize = "2G"
test.expectedSize = "2G"
}
claim := newClaim(test, ns, "default")
claim.Spec.VolumeMode = &block
testDynamicProvisioning(test, c, claim, nil)
})
})
})
func getDefaultStorageClassName(c clientset.Interface) string {
@ -878,8 +1014,7 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
}
}
// TODO: remove when storage.k8s.io/v1beta1 and beta storage class annotations
// are removed.
// TODO: remove when storage.k8s.io/v1beta1 is removed.
func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.StorageClass {
pluginName := t.provisioner
@ -902,6 +1037,55 @@ func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.Storage
}
}
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "glusterdynamic-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "glusterdynamic-provisioner",
Image: "docker.io/humblec/glusterdynamic-provisioner:v1.0",
Args: []string{
"-config=" + "/etc/heketi/heketi.json",
},
Ports: []v1.ContainerPort{
{Name: "heketi", ContainerPort: 8081},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
@ -988,7 +1172,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
return true, err
}
for _, pv := range allPVs.Items {
if v1helper.GetPersistentVolumeClass(&pv) == scName {
if pv.Spec.StorageClassName == scName {
remainingPVs = append(remainingPVs, &pv)
}
}

View File

@ -57,7 +57,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func DeleteCinderVolume(name string) error {
@ -200,34 +199,9 @@ var _ = utils.SIGDescribe("Volumes", func() {
Describe("Ceph RBD [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewRBDServer(cs, namespace.Name)
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
// create secrets for the server
secret := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
Data: map[string][]byte{
// from test/images/volumes-tester/rbd/keyring
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/rbd",
}
secClient := cs.CoreV1().Secrets(config.Namespace)
defer func() {
secClient.Delete(config.Prefix+"-secret", nil)
}()
if _, err := secClient.Create(&secret); err != nil {
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
}
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
@ -238,7 +212,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: config.Prefix + "-secret",
Name: secret.Name,
},
FSType: "ext2",
},
@ -258,45 +232,9 @@ var _ = utils.SIGDescribe("Volumes", func() {
////////////////////////////////////////////////////////////////////////
Describe("CephFS [Feature:Volumes]", func() {
It("should be mountable", func() {
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cephfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeCephServer),
ServerPorts: []int{6789},
}
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
_, serverIP := framework.CreateStorageServer(cs, config)
By("sleeping a bit to give ceph server time to initialize")
time.Sleep(framework.VolumeServerPodStartupSleep)
// create ceph secret
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
// and encode in base64
Data: map[string][]byte{
"key": []byte("AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ=="),
},
Type: "kubernetes.io/cephfs",
}
defer func() {
if err := cs.CoreV1().Secrets(namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
}()
var err error
if secret, err = cs.CoreV1().Secrets(namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
@ -304,7 +242,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: config.Prefix + "-secret"},
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
ReadOnly: true,
},
},

View File

@ -33,6 +33,7 @@ go_library(
"vsphere_volume_ops_storm.go",
"vsphere_volume_perf.go",
"vsphere_volume_placement.go",
"vsphere_volume_vpxd_restart.go",
"vsphere_volume_vsan_policy.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
@ -51,10 +52,9 @@ go_library(
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -17,6 +17,7 @@ limitations under the License.
package vsphere
import (
"context"
"fmt"
neturl "net/url"
"sync"
@ -25,7 +26,6 @@ import (
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"golang.org/x/net/context"
)
const (

View File

@ -17,14 +17,15 @@ limitations under the License.
package vsphere
import (
"context"
"errors"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"strings"
"sync"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
type NodeMapper struct {

View File

@ -87,11 +87,10 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
Selector: selector,
StorageClassName: &emptyStorageClass,
}
}
By("Creating the PV and PVC")
@ -178,7 +177,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
3. Verify that written file is accessible after kubelet restart
*/
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv)
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
})
/*
@ -193,7 +192,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
5. Verify that volume mount not to be found.
*/
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv)
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
})
/*

View File

@ -17,18 +17,19 @@ limitations under the License.
package vsphere
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/kubernetes/test/e2e/framework"
"path/filepath"
"strconv"
"strings"
"time"
)
const (
@ -57,7 +58,7 @@ type VolumeOptions struct {
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, true)
finder := find.NewFinder(vs.Client.Client, false)
return finder.Datacenter(ctx, datacenterPath)
}
@ -70,7 +71,7 @@ func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc obje
// GetAllDatacenter returns all the DataCenter Objects
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, true)
finder := find.NewFinder(vs.Client.Client, false)
return finder.DatacenterList(ctx, "*")
}
@ -88,7 +89,7 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(datacenter.Client(), true)
finder := find.NewFinder(datacenter.Client(), false)
finder.SetDatacenter(datacenter)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
@ -113,7 +114,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
return "", fmt.Errorf("datacenter is nil")
}
vs.initVolumeOptions(volumeOptions)
finder := find.NewFinder(datacenter.Client(), true)
finder := find.NewFinder(datacenter.Client(), false)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {

View File

@ -192,7 +192,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", sc[index%len(sc)]))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
Expect(err).NotTo(HaveOccurred())
pvclaims[i] = pvclaim
}

View File

@ -128,7 +128,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "1Gi", sc))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)

View File

@ -17,19 +17,23 @@ limitations under the License.
package vsphere
import (
"context"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -40,11 +44,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/vmware/govmomi/find"
vimtypes "github.com/vmware/govmomi/vim25/types"
"regexp"
"strings"
)
const (
@ -254,15 +253,11 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string) *st
return sc
}
func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
scAnnotation := make(map[string]string)
scAnnotation[v1.BetaStorageClassAnnotation] = storageclass.Name
func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
Annotations: scAnnotation,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
@ -273,6 +268,7 @@ func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, s
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
},
},
StorageClassName: &(storageclass.Name),
},
}
return claim
@ -373,7 +369,7 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
return pod
}
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths []string) {
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
@ -657,7 +653,7 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, true)
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
Expect(err).NotTo(HaveOccurred())
@ -755,3 +751,72 @@ func GetReadySchedulableRandomNodeInfo() *NodeInfo {
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}
// invokeVCenterServiceControl invokes the given command for the given service
// via service-control on the given vCenter host over SSH.
func invokeVCenterServiceControl(command, service, host string) error {
sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
result, err := framework.SSH(sshCmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
framework.LogSSHResult(result)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
}
return nil
}
// expectVolumeToBeAttached checks if the given Volume is attached to the given
// Node, else fails.
func expectVolumeToBeAttached(nodeName, volumePath string) {
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
}
// expectVolumesToBeAttached checks if the given Volumes are attached to the
// corresponding set of Nodes, else fails.
func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
for i, pod := range pods {
nodeName := pod.Spec.NodeName
volumePath := volumePaths[i]
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
}
}
// expectFilesToBeAccessible checks if the given files are accessible on the
// corresponding set of Nodes, else fails.
func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
for i, pod := range pods {
podName := pod.Name
filePath := filePaths[i]
By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
}
}
// writeContentToPodFile writes the given content to the specified file.
func writeContentToPodFile(namespace, podName, filePath, content string) error {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
return err
}
// expectFileContentToMatch checks if a given file contains the specified
// content, else fails.
func expectFileContentToMatch(namespace, podName, filePath, content string) {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
}
// expectFileContentsToMatch checks if the given contents match the ones present
// in corresponding files on respective Pods, else fails.
func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
for i, pod := range pods {
podName := pod.Name
filePath := filePaths[i]
By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
expectFileContentToMatch(namespace, podName, filePath, contents[i])
}
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package vsphere
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -98,9 +97,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
nodeName := pod.Spec.NodeName
By("Verifying volume is attached")
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By("Deleting pod")
err = framework.DeletePodWithWait(f, client, pod)

View File

@ -84,7 +84,7 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string,
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)

View File

@ -17,13 +17,14 @@ limitations under the License.
package vsphere
import (
"context"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -112,7 +113,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
Expect(err).NotTo(HaveOccurred())

View File

@ -82,7 +82,7 @@ func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string,
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, diskSize, storageclass))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)

View File

@ -151,7 +151,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
var pvclaims []*v1.PersistentVolumeClaim

View File

@ -103,10 +103,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
expectVolumeToBeAttached(nodeName, volumePath)
}
By("Restarting kubelet on master node")
@ -121,10 +118,9 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
for i, pod := range pods {
volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
isAttached, err := diskIsAttached(volumePaths[i], nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)

View File

@ -17,12 +17,12 @@ limitations under the License.
package vsphere
import (
"context"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"golang.org/x/net/context"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"

View File

@ -17,17 +17,17 @@ limitations under the License.
package vsphere
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"golang.org/x/net/context"
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, "1Gi", storageclass)
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
@ -111,7 +111,8 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, _ := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
defer vm.PowerOn(ctx)
@ -139,7 +140,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) {
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
var (
err error
newNode string
@ -174,7 +175,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
}
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err

View File

@ -93,7 +93,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
By("Creating PVCs using the Storage Class")
count := 0
for count < volume_ops_scale {
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
count++
}

View File

@ -170,7 +170,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
var pvclaims []*v1.PersistentVolumeClaim
for j := 0; j < volumesPerPod; j++ {
currsc := sc[((i*numPods)+j)%len(sc)]
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", currsc))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
Expect(err).NotTo(HaveOccurred())
pvclaims = append(pvclaims, pvclaim)
}

View File

@ -320,9 +320,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
// Verify newly and previously created files present on the volume mounted on the pod
By("Verify newly Created file and previously created files present on volume mounted on pod-A")
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles)
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
By("Verify newly Created file and previously created files present on volume mounted on pod-B")
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles)
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
By("Deleting pod-A")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
@ -378,7 +378,7 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
// Verify newly and previously created files present on the volume mounted on the pod
By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck)
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
}
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {

View File

@ -0,0 +1,176 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify that a volume remains attached through vpxd restart.
For the number of schedulable nodes:
1. Create a Volume with default options.
2. Create a Pod with the created Volume.
3. Verify that the Volume is attached.
4. Create a file with random contents under the Volume's mount point on the Pod.
5. Stop the vpxd service on the vCenter host.
6. Verify that the file is accessible on the Pod and that it's contents match.
7. Start the vpxd service on the vCenter host.
8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match.
9. Delete the Pod and wait for the Volume to be detached.
10. Delete the Volume.
*/
var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("restart-vpxd")
type node struct {
name string
kvLabels map[string]string
nodeInfo *NodeInfo
}
const (
labelKey = "vsphere_e2e_label_vpxd_restart"
vpxdServiceName = "vmware-vpxd"
)
var (
client clientset.Interface
namespace string
vcNodesMap map[string][]node
)
BeforeEach(func() {
// Requires SSH access to vCenter.
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes := len(nodes.Items)
Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart")
vcNodesMap = make(map[string][]node)
for i := 0; i < numNodes; i++ {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name)
nodeName := nodes.Items[i].Name
nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID())
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel)
vcHost := nodeInfo.VSphere.Config.Hostname
vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{
name: nodeName,
kvLabels: map[string]string{labelKey: nodeLabel},
nodeInfo: nodeInfo,
})
}
})
It("verify volume remains attached through vpxd restart", func() {
for vcHost, nodes := range vcNodesMap {
var (
volumePaths []string
filePaths []string
fileContents []string
pods []*v1.Pod
)
framework.Logf("Testing for nodes on vCenter host: %s", vcHost)
for i, node := range nodes {
By(fmt.Sprintf("Creating test vsphere volume %d", i))
volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, node.name))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for pod %d to be ready", i))
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pods = append(pods, pod)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i))
filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10))
randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10))
err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent)
Expect(err).NotTo(HaveOccurred())
filePaths = append(filePaths, filePath)
fileContents = append(fileContents, randomContent)
}
By("Stopping vpxd on the vCenter host")
vcAddress := vcHost + ":22"
err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to stop vpxd on the vCenter host")
expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
By("Starting vpxd on the vCenter host")
err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to start vpxd on the vCenter host")
expectVolumesToBeAttached(pods, volumePaths)
expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
for i, node := range nodes {
pod := pods[i]
nodeName := pod.Spec.NodeName
volumePath := volumePaths[i]
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))
err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
}
})
})

View File

@ -278,7 +278,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
@ -310,7 +310,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
@ -329,7 +329,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
var pvclaims []*v1.PersistentVolumeClaim