vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,99 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"csi_objects.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"ephemeral_volume.go",
"flexvolume.go",
"generic_persistent_volume-disruptive.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"pv_protection.go",
"pvc_protection.go",
"regional_pd.go",
"subpath.go",
"volume_expand.go",
"volume_io.go",
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/storage/utils:all-srcs",
"//test/e2e/storage/vsphere:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,18 +0,0 @@
approvers:
- saad-ali
- rootfs
- gnufied
- jingxu97
- jsafrane
- msau42
reviewers:
- saad-ali
- rootfs
- gnufied
- jingxu97
- jsafrane
- msau42
- jeffvance
- copejon
- verult
- davidz627

View File

@ -1,415 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
)
var csiImageVersions = map[string]string{
"hostpathplugin": "v0.2.0",
"csi-attacher": "v0.2.0",
"csi-provisioner": "v0.2.1",
"driver-registrar": "v0.2.0",
}
func csiContainerImage(image string) string {
var fullName string
fullName += framework.TestContext.CSIImageRegistry + "/" + image + ":"
if framework.TestContext.CSIImageVersion != "" {
fullName += framework.TestContext.CSIImageVersion
} else {
fullName += csiImageVersions[image]
}
return fullName
}
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
creatingString := "Creating"
if teardown {
creatingString = "Deleting"
}
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
}
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiContainerImage("csi-provisioner"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiContainerImage("driver-registrar"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiContainerImage("csi-attacher"),
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiContainerImage("hostpathplugin"),
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}
func deployGCEPDCSIDriver(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
nodeSA *v1.ServiceAccount,
controllerSA *v1.ServiceAccount,
) {
// Get API Objects from manifests
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
framework.ExpectNoError(err, "Failed to create Service from manifest")
// Got all objects from manifests now try to delete objects
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
}
}
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
}
}
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
}
}
if teardown {
return
}
// Create new API Objects through client
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
}

View File

@ -1,225 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"math/rand"
"time"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
)
type csiTestDriver interface {
createCSIDriver()
cleanupCSIDriver()
createStorageClassTest(node v1.Node) storageClassTest
}
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
}
var _ = utils.SIGDescribe("CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
var (
cs clientset.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "csi",
ClientNodeName: node.Name,
ServerNodeName: node.Name,
WaitForCompletion: true,
}
csiDriverRegistrarClusterRole(config)
})
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
var (
driver csiTestDriver
)
BeforeEach(func() {
driver = curInitCSIDriver(f, config)
driver.createCSIDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
})
It("should provision storage", func() {
t := driver.createStorageClassTest(node)
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
}
})
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
return &hostpathCSIDriver{
combinedClusterRoleNames: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
},
f: f,
config: config,
}
}
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
return storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
}
func (h *hostpathCSIDriver) createCSIDriver() {
By("deploying csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, h.serviceAccount)
}
func (h *hostpathCSIDriver) cleanupCSIDriver() {
By("uninstalling csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
csiHostPathPod(cs, config, true, f, h.serviceAccount)
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
}
type gcePDCSIDriver struct {
controllerClusterRoles []string
nodeClusterRoles []string
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
nodeClusterRoles: []string{
csiDriverRegistrarClusterRoleName,
},
controllerClusterRoles: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
},
f: f,
config: config,
}
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
}
}
func (g *gcePDCSIDriver) createCSIDriver() {
By("deploying gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
func (g *gcePDCSIDriver) cleanupCSIDriver() {
By("uninstalling gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
}

View File

@ -1,394 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
// These numbers are obtained empirically.
// If you make them too low, you'll get flaky
// tests instead of failing ones if the race bug reappears.
// If you make volume counts or pod counts too high,
// the tests may fail because mounting configmap/git_repo
// volumes is not very fast and the tests may time out
// waiting for pods to become Running.
// And of course the higher are the numbers, the
// slower are the tests.
wrappedVolumeRaceConfigMapVolumeCount = 50
wrappedVolumeRaceConfigMapPodCount = 5
wrappedVolumeRaceConfigMapIterationCount = 3
wrappedVolumeRaceGitRepoVolumeCount = 50
wrappedVolumeRaceGitRepoPodCount = 5
wrappedVolumeRaceGitRepoIterationCount = 3
wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
)
var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
f := framework.NewDefaultFramework("emptydir-wrapper")
It("should not conflict", func() {
name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume"
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
},
}
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
gitVolumeName := "git-volume"
gitVolumeMountPath := "/etc/git-volume"
gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: gitVolumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-test",
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: gitVolumeName,
MountPath: gitVolumeMountPath,
},
},
},
},
},
}
pod = f.PodClient().CreateSync(pod)
defer func() {
By("Cleaning up the secret")
if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
}
}()
})
// The following two tests check for the problem fixed in #29641.
// In order to reproduce it you need to revert the fix, e.g. via
// git revert -n df1e925143daf34199b55ffb91d0598244888cce
// or
// curl -sL https://github.com/kubernetes/kubernetes/pull/29641.patch | patch -p1 -R
//
// After that these tests will fail because some of the pods
// they create never enter Running state.
//
// They need to be [Serial] and [Slow] because they try to induce
// the race by creating pods with many volumes and container volume mounts,
// which takes considerable time and may interfere with other tests.
//
// Probably should also try making tests for secrets and downwardapi,
// but these cases are harder because tmpfs-based emptyDir
// appears to be less prone to the race problem.
It("should not cause race condition when used for configmaps [Serial] [Slow]", func() {
configMapNames := createConfigmapsForRace(f)
defer deleteConfigMaps(f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
}
})
It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
gitURL, gitRepo, cleanup := createGitServer(f)
defer cleanup()
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
}
})
})
func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
var err error
gitServerPodName := "git-server-" + string(uuid.NewUUID())
containerPort := 8000
labels := map[string]string{"name": gitServerPodName}
gitServerPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: gitServerPodName,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "git-repo",
Image: imageutils.GetE2EImage(imageutils.Fakegitserver),
ImagePullPolicy: "IfNotPresent",
Ports: []v1.ContainerPort{
{ContainerPort: int32(containerPort)},
},
},
},
},
}
f.PodClient().CreateSync(gitServerPod)
// Portal IP and port
httpPort := 2345
gitServerSvc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "git-server-svc",
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{
{
Name: "http-portal",
Port: int32(httpPort),
TargetPort: intstr.FromInt(containerPort),
},
},
},
}
if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
}
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod")
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
By("Cleaning up the git server svc")
if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
}
}
}
func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
volumeName := fmt.Sprintf("racey-git-repo-%d", i)
volumes = append(volumes, v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
})
}
return
}
func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i)
configMapNames = append(configMapNames, configMapName)
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: configMapName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
framework.ExpectNoError(err)
}
return
}
func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
By("Cleaning up the configMaps")
for _, configMapName := range configMapNames {
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName)
}
}
func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i, configMapName := range configMapNames {
volumeName := fmt.Sprintf("racey-configmap-%d", i)
volumes = append(volumes, v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "data-1",
},
},
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/config-%d", i),
})
}
return
}
func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
targetNode := nodeList.Items[0]
By("Creating RC which spawns configmap-volume pods")
affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/hostname",
Operator: v1.NodeSelectorOpIn,
Values: []string{targetNode.Name},
},
},
},
},
},
},
}
rc := &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: rcName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &podCount,
Selector: map[string]string{
"name": rcName,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": rcName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: "busybox",
Command: []string{"sleep", "10000"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
},
},
VolumeMounts: volumeMounts,
},
},
Affinity: affinity,
DNSPolicy: v1.DNSDefault,
Volumes: volumes,
},
},
},
}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc)
Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() {
err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for pod %s to enter running state", pod.Name)
}
}

View File

@ -1,138 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = utils.SIGDescribe("Ephemeralstorage", func() {
var (
c clientset.Interface
)
f := framework.NewDefaultFramework("pv")
BeforeEach(func() {
c = f.ClientSet
})
Describe("When pod refers to non-existent ephemeral storage", func() {
for _, testSource := range invalidEphemeralSource("pod-ephm-test") {
It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() {
pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source)
pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
// Allow it to sleep for 30 seconds
time.Sleep(30 * time.Second)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
}
})
})
type ephemeralTestInfo struct {
volumeType string
source *v1.VolumeSource
}
func testEphemeralVolumePod(f *framework.Framework, volumeType string, source *v1.VolumeSource) *v1.Pod {
var (
suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-ephm-test-%s", suffix),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
},
},
}
}
func invalidEphemeralSource(suffix string) []ephemeralTestInfo {
testInfo := []ephemeralTestInfo{
{
volumeType: "secret",
source: &v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: fmt.Sprintf("secert-%s", suffix),
},
},
},
{
volumeType: "configmap",
source: &v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: fmt.Sprintf("configmap-%s", suffix),
},
},
},
},
{
volumeType: "projected",
source: &v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: fmt.Sprintf("secret-%s", suffix),
},
},
},
},
},
},
},
}
return testInfo
}

View File

@ -1,266 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"math/rand"
"net"
"path"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
versionutil "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
sshPort = "22"
driverDir = "test/e2e/testing-manifests/flexvolume/"
defaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
// TODO: change this and config-test.sh when default flex volume install path is changed for GCI
// On gci, root is read-only and controller-manager containerized. Assume
// controller-manager has started with --flex-volume-plugin-dir equal to this
// (see cluster/gce/config-test.sh)
gciVolumePluginDir = "/home/kubernetes/flexvolume"
gciVolumePluginDirLegacy = "/etc/srv/kubernetes/kubelet-plugins/volume/exec"
gciVolumePluginDirVersion = "1.10.0"
)
// testFlexVolume tests that a client pod using a given flexvolume driver
// successfully mounts it and runs
func testFlexVolume(driver string, cs clientset.Interface, config framework.VolumeTestConfig, f *framework.Framework) {
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
Driver: "k8s/" + driver,
},
},
File: "index.html",
// Must match content of examples/volumes/flexvolume/dummy(-attachable) domount
ExpectedContent: "Hello from flexvolume!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
framework.VolumeTestCleanup(f, config)
}
// installFlex installs the driver found at filePath on the node, and restarts
// kubelet if 'restart' is true. If node is nil, installs on the master, and restarts
// controller-manager if 'restart' is true.
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string, restart bool) {
flexDir := getFlexDir(c, node, vendor, driver)
flexFile := path.Join(flexDir, driver)
host := ""
if node != nil {
host = framework.GetNodeExternalIP(node)
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
}
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host)
data := generated.ReadOrDie(filePath)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host)
cmd = fmt.Sprintf("sudo chmod +x %s", flexFile)
sshAndLog(cmd, host)
if !restart {
return
}
if node != nil {
err := framework.RestartKubelet(host)
framework.ExpectNoError(err)
err = framework.WaitForKubeletUp(host)
framework.ExpectNoError(err)
} else {
err := framework.RestartControllerManager()
framework.ExpectNoError(err)
err = framework.WaitForControllerManagerUp()
framework.ExpectNoError(err)
}
}
func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) {
flexDir := getFlexDir(c, node, vendor, driver)
host := ""
if node != nil {
host = framework.GetNodeExternalIP(node)
} else {
host = net.JoinHostPort(framework.GetMasterHost(), sshPort)
}
cmd := fmt.Sprintf("sudo rm -r %s", flexDir)
sshAndLog(cmd, host)
}
func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string {
volumePluginDir := defaultVolumePluginDir
if framework.ProviderIs("gce") {
if node == nil && framework.MasterOSDistroIs("gci") {
v, err := getMasterVersion(c)
if err != nil {
framework.Failf("Error getting master version: %v", err)
}
if v.AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
} else if node != nil && framework.NodeOSDistroIs("gci") {
if getNodeVersion(node).AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
}
}
flexDir := path.Join(volumePluginDir, fmt.Sprintf("/%s~%s/", vendor, driver))
return flexDir
}
func sshAndLog(cmd, host string) {
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
}
}
func getMasterVersion(c clientset.Interface) (*versionutil.Version, error) {
var err error
var v *version.Info
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
v, err = c.Discovery().ServerVersion()
return err == nil, nil
})
if waitErr != nil {
return nil, fmt.Errorf("Could not get the master version: %v", waitErr)
}
return versionutil.MustParseSemantic(v.GitVersion), nil
}
func getNodeVersion(node *v1.Node) *versionutil.Version {
return versionutil.MustParseSemantic(node.Status.NodeInfo.KubeletVersion)
}
var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
f := framework.NewDefaultFramework("flexvolume")
// note that namespace deletion is handled by delete-namespace flag
var cs clientset.Interface
var ns *v1.Namespace
var node v1.Node
var config framework.VolumeTestConfig
var suffix string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessMasterOSDistroIs("gci")
framework.SkipUnlessNodeOSDistroIs("debian", "gci")
framework.SkipUnlessSSHKeyPresent()
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "flex",
ClientNodeName: node.Name,
}
suffix = ns.Name
})
It("should be mountable when non-attachable", func() {
driver := "dummy"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
It("should be mountable when attachable", func() {
driver := "dummy-attachable"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})
It("should install plugin without kubelet restart", func() {
driver := "dummy"
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), false /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
By("waiting for flex client pod to terminate")
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
})

View File

@ -1,103 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
f := framework.NewDefaultFramework("generic-disruptive-pv")
var (
c clientset.Interface
ns string
)
BeforeEach(func() {
// Skip tests unless number of nodes is 2
framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipIfProviderIs("local")
c = f.ClientSet
ns = f.Namespace.Name
})
disruptiveTestTable := []disruptiveTest{
{
testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.",
runTest: utils.TestKubeletRestartsAndRestoresMount,
},
{
testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromDeletedPod,
},
{
testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
},
}
Context("When kubelet restarts", func() {
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest.
var (
clientPod *v1.Pod
pvc *v1.PersistentVolumeClaim
)
BeforeEach(func() {
framework.Logf("Initializing pod and pvcs for test")
clientPod, pvc = createPodPVCFromSC(f, c, ns)
})
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod)
})
}(test)
}
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, nil)
pvc, clientPod = nil, nil
})
})
})
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) {
var err error
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
pvc := newClaim(test, ns, "default")
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test")
return pod, pvc
}

View File

@ -1,173 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Mounted volume expand[Slow]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
)
f := framework.NewDefaultFramework("mounted-volume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
nodeKey = "mounted_volume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = newClaim(test, ns, "default")
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
It("Should verify mounted devices can be resized", func() {
By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Getting a pod from deployment")
podList, err := framework.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing")
By("Waiting for deployment to create new pod")
pod, err = waitForDeploymentToRecreatePod(c, deployment)
Expect(err).NotTo(HaveOccurred(), "While waiting for pod to be recreated")
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
for _, pod := range podList.Items {
switch pod.Status.Phase {
case v1.PodRunning:
runningPod = pod
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
return false, err
})
return runningPod, waitErr
}

View File

@ -1,285 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type disruptiveTest struct {
testItStmt string
runTest testBody
}
const (
MinNodes = 2
)
var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
f := framework.NewDefaultFramework("disruptive-pv")
var (
c clientset.Interface
ns string
nfsServerPod *v1.Pod
nfsPVconfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
nfsServerIP, clientNodeIP string
clientNode *v1.Node
volLabel labels.Set
selector *metav1.LabelSelector
)
BeforeEach(func() {
// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
framework.SkipUnlessNodeCountIsAtLeast(MinNodes)
framework.SkipIfProviderIs("local")
c = f.ClientSet
ns = f.Namespace.Name
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod.
_, nfsServerPod, nfsServerIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
nfsPVconfig = framework.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServerIP,
Path: "/exports",
ReadOnly: false,
},
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
// Get the first ready node IP that is not hosting the NFS pod.
if clientNodeIP == "" {
framework.Logf("Designating test node")
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node
clientNodeIP = framework.GetNodeExternalIP(clientNode)
break
}
}
Expect(clientNodeIP).NotTo(BeEmpty())
}
})
AfterEach(func() {
framework.DeletePodWithWait(f, c, nfsServerPod)
})
Context("when kube-controller-manager restarts", func() {
var (
diskName1, diskName2 string
err error
pvConfig1, pvConfig2 framework.PersistentVolumeConfig
pv1, pv2 *v1.PersistentVolume
pvSource1, pvSource2 *v1.PersistentVolumeSource
pvc1, pvc2 *v1.PersistentVolumeClaim
clientPod *v1.Pod
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent()
By("Initializing first PD with PVPVC binding")
pvSource1, diskName1 = framework.CreateGCEVolume()
Expect(err).NotTo(HaveOccurred())
pvConfig1 = framework.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: *pvSource1,
Prebind: nil,
}
pv1, pvc1, err = framework.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1))
By("Initializing second PD with PVPVC binding")
pvSource2, diskName2 = framework.CreateGCEVolume()
Expect(err).NotTo(HaveOccurred())
pvConfig2 = framework.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: *pvSource2,
Prebind: nil,
}
pv2, pvc2, err = framework.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
By("Attaching both PVC's to a single pod")
clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
// Delete client/user pod first
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
// Delete PV and PVCs
if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv1, pvc1 = nil, nil
if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv2, pvc2 = nil, nil
// Delete the actual disks
if diskName1 != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName1))
}
if diskName2 != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName2))
}
})
It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() {
By("Deleting PVC for volume 2")
err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
Expect(err).NotTo(HaveOccurred())
pvc2 = nil
By("Restarting the kube-controller-manager")
err = framework.RestartControllerManager()
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForControllerManagerUp()
Expect(err).NotTo(HaveOccurred())
framework.Logf("kube-controller-manager restarted")
By("Observing the kube-controller-manager healthy for at least 2 minutes")
// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
err = framework.CheckForControllerManagerHealthy(2 * time.Minute)
Expect(err).NotTo(HaveOccurred())
})
})
Context("when kubelet restarts", func() {
var (
clientPod *v1.Pod
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
)
BeforeEach(func() {
framework.Logf("Initializing test spec")
clientPod, pv, pvc = initTestCase(f, c, nfsPVconfig, pvcConfig, ns, clientNode.Name)
})
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, pv)
pv, pvc, clientPod = nil, nil, nil
})
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest.
disruptiveTestTable := []disruptiveTest{
{
testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.",
runTest: utils.TestKubeletRestartsAndRestoresMount,
},
{
testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromDeletedPod,
},
{
testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
},
}
// Test loop executes each disruptiveTest iteratively.
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod)
})
}(test)
}
})
})
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
// by the test.
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
defer func() {
if err != nil {
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
}
}()
Expect(err).NotTo(HaveOccurred())
pod := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod.Spec.NodeName = nodeName
framework.Logf("Creating NFS client pod.")
pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName)
Expect(err).NotTo(HaveOccurred())
defer func() {
if err != nil {
framework.DeletePodWithWait(f, c, pod)
}
}()
err = framework.WaitForPodRunningInNamespace(c, pod)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name))
// Return created api objects
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pod, pv, pvc
}
// tearDownTestCase destroy resources created by initTestCase.
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
// Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
if pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
}
}

View File

@ -1,649 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
mathrand "math/rand"
"strings"
"time"
"google.golang.org/api/googleapi"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
gcePDDetachTimeout = 10 * time.Minute
gcePDDetachPollTime = 10 * time.Second
nodeStatusTimeout = 10 * time.Minute
nodeStatusPollTime = 1 * time.Second
podEvictTimeout = 2 * time.Minute
maxReadRetry = 3
minNodes = 2
)
var _ = utils.SIGDescribe("Pod Disks", func() {
var (
ns string
cs clientset.Interface
podClient v1core.PodInterface
nodeClient v1core.NodeInterface
host0Name types.NodeName
host1Name types.NodeName
nodes *v1.NodeList
)
f := framework.NewDefaultFramework("pod-disks")
BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(minNodes)
cs = f.ClientSet
ns = f.Namespace.Name
framework.SkipIfMultizone(cs)
podClient = cs.CoreV1().Pods(ns)
nodeClient = cs.CoreV1().Nodes()
nodes = framework.GetReadySchedulableNodesOrDie(cs)
Expect(len(nodes.Items)).To(BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes))
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
mathrand.Seed(time.Now().UTC().UnixNano())
})
Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() {
const (
podDefaultGrace = "default (30s)"
podImmediateGrace = "immediate (0s)"
)
var readOnlyMap = map[bool]string{
true: "read-only",
false: "RW",
}
type testT struct {
descr string // It description
readOnly bool // true means pd is read-only
deleteOpt *metav1.DeleteOptions // pod delete option
}
tests := []testT{
{
descr: podImmediateGrace,
readOnly: false,
deleteOpt: metav1.NewDeleteOptions(0),
},
{
descr: podDefaultGrace,
readOnly: false,
deleteOpt: &metav1.DeleteOptions{},
},
{
descr: podImmediateGrace,
readOnly: true,
deleteOpt: metav1.NewDeleteOptions(0),
},
{
descr: podDefaultGrace,
readOnly: true,
deleteOpt: &metav1.DeleteOptions{},
},
}
for _, t := range tests {
podDelOpt := t.deleteOpt
readOnly := t.readOnly
readOnlyTxt := readOnlyMap[readOnly]
It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if readOnly {
framework.SkipIfProviderIs("aws")
}
By("creating PD")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
var fmtPod *v1.Pod
if readOnly {
// if all test pods are RO then need a RW pod to format pd
By("creating RW fmt Pod to ensure PD is formatted")
fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
_, err = podClient.Create(fmtPod)
framework.ExpectNoError(err, "Failed to create fmtPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name))
By("deleting the fmtPod")
framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
framework.Logf("deleted fmtPod %q", fmtPod.Name)
By("waiting for PD to detach")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
}
// prepare to create two test pods on separate nodes
host0Pod := testPDPod([]string{diskName}, host0Name, readOnly, 1)
host1Pod := testPDPod([]string{diskName}, host1Name, readOnly, 1)
defer func() {
// Teardown should do nothing unless test failed
By("defer: cleaning up PD-RW test environment")
framework.Logf("defer cleanup errors can usually be ignored")
if fmtPod != nil {
podClient.Delete(fmtPod.Name, podDelOpt)
}
podClient.Delete(host0Pod.Name, podDelOpt)
podClient.Delete(host1Pod.Name, podDelOpt)
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
By("creating host0Pod on node0")
_, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
var containerName, testFile, testFileContents string
if !readOnly {
By("writing content to host0Pod on node0")
containerName = "mycontainer"
testFile = "/testpd1/tracker"
testFileContents = fmt.Sprintf("%v", mathrand.Int())
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
By("verifying PD is present in node0's VolumeInUse list")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
By("deleting host0Pod") // delete this pod before creating next pod
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
framework.Logf("deleted host0Pod %q", host0Pod.Name)
}
By("creating host1Pod on node1")
_, err = podClient.Create(host1Pod)
framework.ExpectNoError(err, "Failed to create host1Pod")
framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name))
framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
if readOnly {
By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
framework.Logf("deleted host0Pod %q", host0Pod.Name)
} else {
By("verifying PD contents in host1Pod")
verifyPDContentsViaContainer(f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
framework.Logf("verified PD contents in pod %q", host1Pod.Name)
By("verifying PD is removed from node0")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
framework.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
}
By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
framework.Logf("deleted host1Pod %q", host1Pod.Name)
By("Test completed successfully, waiting for PD to detach from both nodes")
waitForPDDetach(diskName, host0Name)
waitForPDDetach(diskName, host1Name)
})
}
})
Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() {
type testT struct {
numContainers int
numPDs int
repeatCnt int
}
tests := []testT{
{
numContainers: 4,
numPDs: 1,
repeatCnt: 3,
},
{
numContainers: 1,
numPDs: 2,
repeatCnt: 3,
},
}
for _, t := range tests {
numPDs := t.numPDs
numContainers := t.numContainers
It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() {
framework.SkipUnlessProviderIs("gce", "gke", "aws")
var host0Pod *v1.Pod
var err error
fileAndContentToVerify := make(map[string]string)
diskNames := make([]string, 0, numPDs)
By(fmt.Sprintf("creating %d PD(s)", numPDs))
for i := 0; i < numPDs; i++ {
name, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i))
diskNames = append(diskNames, name)
}
defer func() {
// Teardown should do nothing unless test failed.
By("defer: cleaning up PD-RW test environment")
framework.Logf("defer cleanup errors can usually be ignored")
if host0Pod != nil {
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
}
for _, diskName := range diskNames {
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}
}()
for i := 0; i < t.repeatCnt; i++ { // "rapid" repeat loop
framework.Logf("PD Read/Writer Iteration #%v", i)
By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
_, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
containerName := "mycontainer"
if numContainers > 1 {
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
}
for x := 1; x <= numPDs; x++ {
testFile := fmt.Sprintf("/testpd%d/tracker%d", x, i)
testFileContents := fmt.Sprintf("%v", mathrand.Int())
fileAndContentToVerify[testFile] = testFileContents
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
}
By("verifying PD contents via a container")
if numContainers > 1 {
containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
}
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs))
for _, diskName := range diskNames {
waitForPDDetach(diskName, host0Name)
}
})
}
})
Context("detach in a disrupted environment [Slow] [Disruptive]", func() {
const (
deleteNode = 1 // delete physical node
deleteNodeObj = 2 // delete node's api object only
evictPod = 3 // evict host0Pod on node0
)
type testT struct {
descr string // It description
disruptOp int // disruptive operation performed on target node
}
tests := []testT{
{
descr: "node is deleted",
disruptOp: deleteNode,
},
{
descr: "node's API object is deleted",
disruptOp: deleteNodeObj,
},
{
descr: "pod is evicted",
disruptOp: evictPod,
},
}
for _, t := range tests {
disruptOp := t.disruptOp
It(fmt.Sprintf("when %s", t.descr), func() {
framework.SkipUnlessProviderIs("gce")
origNodeCnt := len(nodes.Items) // healhy nodes running kublet
By("creating a pd")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err, "Error creating a pd")
targetNode := &nodes.Items[0] // for node delete ops
host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
containerName := "mycontainer"
defer func() {
By("defer: cleaning up PD-RW test env")
framework.Logf("defer cleanup errors can usually be ignored")
By("defer: delete host0Pod")
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
By("defer: detach and delete PDs")
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
if disruptOp == deleteNode || disruptOp == deleteNodeObj {
if disruptOp == deleteNodeObj {
targetNode.ObjectMeta.SetResourceVersion("0")
// need to set the resource version or else the Create() fails
By("defer: re-create host0 node object")
_, err := nodeClient.Create(targetNode)
framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name))
}
By("defer: verify the number of ready nodes")
numNodes := countReadyNodes(cs, host0Name)
// if this defer is reached due to an Expect then nested
// Expects are lost, so use Failf here
if numNodes != origNodeCnt {
framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
}
}
}()
By("creating host0Pod on node0")
_, err = podClient.Create(host0Pod)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
By("waiting for host0Pod to be running")
framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name))
By("writing content to host0Pod")
testFile := "/testpd1/tracker"
testFileContents := fmt.Sprintf("%v", mathrand.Int())
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
By("verifying PD is present in node0's VolumeInUse list")
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
if disruptOp == deleteNode {
By("getting gce instances")
gceCloud, err := framework.GetGCECloud()
framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
Expect(true, strings.Contains(string(output), string(host0Name)))
By("deleting host0")
err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err))
By("expecting host0 node to be re-created")
numNodes := countReadyNodes(cs, host0Name)
Expect(numNodes).To(Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
Expect(false, strings.Contains(string(output), string(host0Name)))
} else if disruptOp == deleteNodeObj {
By("deleting host0's node api object")
framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object")
By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
} else if disruptOp == evictPod {
evictTarget := &policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: host0Pod.Name,
Namespace: ns,
},
}
By("evicting host0Pod")
err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) {
err = cs.CoreV1().Pods(ns).Evict(evictTarget)
if err != nil {
return false, nil
} else {
return true, nil
}
})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout))
}
By("waiting for pd to detach from host0")
waitForPDDetach(diskName, host0Name)
})
}
})
It("should be able to delete a non-existent PD without error", func() {
framework.SkipUnlessProviderIs("gce")
By("delete a PD")
framework.ExpectNoError(framework.DeletePDWithRetry("non-exist"))
})
})
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
framework.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(c)
return len(nodes.Items)
}
func verifyPDContentsViaContainer(f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
for filePath, expectedContents := range fileAndContentToVerify {
var value string
// Add a retry to avoid temporal failure in reading the content
for i := 0; i < maxReadRetry; i++ {
v, err := f.ReadFileViaContainer(podName, containerName, filePath)
value = v
if err != nil {
framework.Logf("Error reading file: %v", err)
}
framework.ExpectNoError(err)
framework.Logf("Read file %q with content: %v (iteration %d)", filePath, v, i)
if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
framework.Logf("Warning: read content <%q> does not match execpted content <%q>.", v, expectedContents)
size, err := f.CheckFileSizeViaContainer(podName, containerName, filePath)
if err != nil {
framework.Logf("Error checking file size: %v", err)
}
framework.Logf("Check file %q size: %q", filePath, size)
} else {
break
}
}
Expect(strings.TrimSpace(value)).To(Equal(strings.TrimSpace(expectedContents)))
}
}
func detachPD(nodeName types.NodeName, pdName string) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
err = gceCloud.DetachDisk(pdName, nodeName)
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
// PD already detached, ignore error.
return nil
}
framework.Logf("Error detaching PD %q: %v", pdName, err)
}
return err
} else if framework.TestContext.Provider == "aws" {
client := ec2.New(session.New())
tokens := strings.Split(pdName, "/")
awsVolumeID := tokens[len(tokens)-1]
request := ec2.DetachVolumeInput{
VolumeId: aws.String(awsVolumeID),
}
_, err := client.DetachVolume(&request)
if err != nil {
return fmt.Errorf("error detaching EBS volume: %v", err)
}
return nil
} else {
return fmt.Errorf("Provider does not support volume detaching")
}
}
// Returns pod spec suitable for api Create call. Handles gce, gke and aws providers only and
// escapes if a different provider is supplied.
// The first container name is hard-coded to "mycontainer". Subsequent containers are named:
// "mycontainer<number> where <number> is 1..numContainers. Note if there is only one container it's
// name has no number.
// Container's volumeMounts are hard-coded to "/testpd<number>" where <number> is 1..len(diskNames).
func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *v1.Pod {
// escape if not a supported provider
if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" ||
framework.TestContext.Provider == "aws") {
framework.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider))
}
containers := make([]v1.Container, numContainers)
for i := range containers {
containers[i].Name = "mycontainer"
if numContainers > 1 {
containers[i].Name = fmt.Sprintf("mycontainer%v", i+1)
}
containers[i].Image = "busybox"
containers[i].Command = []string{"sleep", "6000"}
containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
for k := range diskNames {
containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1)
containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1)
}
containers[i].Resources.Limits = v1.ResourceList{}
containers[i].Resources.Limits[v1.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "pd-test-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: containers,
NodeName: string(targetNode),
},
}
pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
if framework.TestContext.Provider == "aws" {
pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
FSType: "ext4",
ReadOnly: readOnly,
},
}
} else { // "gce" or "gke"
pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext4",
ReadOnly: readOnly,
},
}
}
}
return pod
}
// Waits for specified PD to to detach from specified hostName
func waitForPDDetach(diskName string, nodeName types.NodeName) error {
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
gceCloud, err := framework.GetGCECloud()
if err != nil {
return err
}
for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
if err != nil {
framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
return err
}
if !diskAttached {
// Specified disk does not appear to be attached to specified node
framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
return nil
}
framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
}
return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
}
return nil
}
func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
for _, host := range hosts {
framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
detachPD(host, diskName)
By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
waitForPDDetach(diskName, host)
}
By(fmt.Sprintf("Deleting PD %q", diskName))
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
}
func waitForPDInVolumesInUse(
nodeClient v1core.NodeInterface,
diskName string,
nodeName types.NodeName,
timeout time.Duration,
shouldExist bool) error {
logStr := "to contain"
if !shouldExist {
logStr = "to NOT contain"
}
framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{})
if err != nil || nodeObj == nil {
framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
continue
}
exists := false
for _, volumeInUse := range nodeObj.Status.VolumesInUse {
volumeInUseStr := string(volumeInUse)
if strings.Contains(volumeInUseStr, diskName) {
if shouldExist {
framework.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
return nil
}
exists = true
}
}
if !shouldExist && !exists {
framework.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
return nil
}
}
return fmt.Errorf("Timed out waiting for node %s VolumesInUse Status %s diskName %q", nodeName, logStr, diskName)
}

View File

@ -1,162 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node
func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
gceCloud, err := framework.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
Expect(err).NotTo(HaveOccurred())
return isAttached
}
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
By("Creating the PV and PVC")
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Client Pod")
clientPod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
return clientPod, pv, pvc
}
// Testing configurations of single a PV/PVC pair attached to a GCE PD
var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
var (
c clientset.Interface
diskName string
ns string
err error
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
node types.NodeName
)
f := framework.NewDefaultFramework("pv")
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing Test Spec")
diskName, err = framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "gce-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, pvcConfig, false)
node = types.NodeName(clientPod.Spec.NodeName)
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
clientPod, pv, pvc, node = nil, nil, nil, ""
if diskName != "" {
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
}
}
})
// Attach a persistent disk to a pod using a PVC.
// Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue())
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
By("Verifying Persistent Disk detach")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
})
// Attach a persistent disk to a pod using a PVC.
// Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue())
By("Deleting the client pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
By("Verifying Persistent Disk detaches")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
})
// Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted.
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() {
By("Deleting the Namespace")
err := c.CoreV1().Namespaces().Delete(ns, nil)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches")
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
})
})

File diff suppressed because it is too large Load Diff

View File

@ -1,303 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
// phase. Note: the PV is deleted in the AfterEach, not here.
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have bound correctly
By("Validating the PV-PVC binding")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
// 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted
By("Checking pod has write access to PersistentVolume")
framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc))
// 3. delete the PVC, wait for PV to become "Released"
By("Deleting the PVC to invoke the reclaim policy.")
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
}
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
// PV. Ensure each step succeeds.
// Note: the PV is deleted in the AfterEach, not here.
// Note: this func is serialized, we wait for each pod to be deleted before creating the
// next pod. Adding concurrency is a TODO item.
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols framework.PVMap, claims framework.PVCMap, expectPhase v1.PersistentVolumePhase) error {
var err error
// 1. verify each PV permits write access to a client pod
By("Checking pod has write access to PersistentVolumes")
for pvcKey := range claims {
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err)
}
if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound
}
// sanity test to ensure our maps are in sync
_, found := pvols[pvc.Spec.VolumeName]
if !found {
return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
}
// TODO: currently a serialized test of each PV
if err = framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc); err != nil {
return err
}
}
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
By("Deleting PVCs to invoke reclaim policy")
if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
return err
}
return nil
}
var _ = utils.SIGDescribe("PersistentVolumes", func() {
// global vars for the Context()s and It()'s below
f := framework.NewDefaultFramework("pv")
var (
c clientset.Interface
ns string
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
err error
)
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
})
// Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs,
// and multiple unevenly paired PV/PVCs
Describe("NFS", func() {
var (
nfsServerPod *v1.Pod
serverIP string
)
BeforeEach(func() {
_, nfsServerPod, serverIP = framework.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/exports",
ReadOnly: false,
},
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
})
AfterEach(func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
pv, pvc = nil, nil
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
})
Context("with Single PV - PVC pairs", func() {
// Note: this is the only code where the pv is deleted.
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
// Individual tests follow:
//
// Create an nfs PV, then a claim that matches the PV, and a pod that
// contains the claim. Verify that the PV and PVC bind correctly, and
// that the pod can write to the nfs volume.
It("should create a non-pre-bound PV and PVC: test write access ", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred())
completeTest(f, c, ns, pv, pvc)
})
// Create a claim first, then a nfs PV that matches the claim, and a
// pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
It("create a PVC and non-pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred())
completeTest(f, c, ns, pv, pvc)
})
// Create a claim first, then a pre-bound nfs PV that matches the claim,
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
It("create a PVC and a pre-bound PV: test write access", func() {
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
completeTest(f, c, ns, pv, pvc)
})
// Create a nfs PV first, then a pre-bound PVC that matches the PV,
// and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume.
It("create a PV and a pre-bound PVC: test write access", func() {
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
completeTest(f, c, ns, pv, pvc)
})
})
// Create multiple pvs and pvcs, all in the same namespace. The PVs-PVCs are
// verified to bind, though it's not known in advanced which PV will bind to
// which claim. For each pv-pvc pair create a pod that writes to the nfs mount.
// Note: when the number of PVs exceeds the number of PVCs the max binding wait
// time will occur for each PV in excess. This is expected but the delta
// should be kept small so that the tests aren't unnecessarily slow.
// Note: future tests may wish to incorporate the following:
// a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
// in different namespaces.
Context("with multiple PVs and PVCs all in same ns", func() {
// scope the pv and pvc maps to be available in the AfterEach
// note: these maps are created fresh in CreatePVsPVCs()
var pvols framework.PVMap
var claims framework.PVCMap
AfterEach(func() {
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
if len(errs) > 0 {
errmsg := []string{}
for _, e := range errs {
errmsg = append(errmsg, e.Error())
}
framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
}
})
// Create 2 PVs and 4 PVCs.
// Note: PVs are created before claims and no pre-binding
It("should create 2 PVs and 4 PVCs: test write access", func() {
numPVs, numPVCs := 2, 4
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
// Create 3 PVs and 3 PVCs.
// Note: PVs are created before claims and no pre-binding
It("should create 3 PVs and 3 PVCs: test write access", func() {
numPVs, numPVCs := 3, 3
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
// Create 4 PVs and 2 PVCs.
// Note: PVs are created before claims and no pre-binding.
It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() {
numPVs, numPVCs := 4, 2
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
})
// This Context isolates and tests the "Recycle" reclaim behavior. On deprecation of the
// Recycler, this entire context can be removed without affecting the test suite or leaving behind
// dead code.
Context("when invoking the Recycle reclaim policy", func() {
BeforeEach(func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred(), "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
// for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod
// (and test) succeed.
It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() {
By("Writing to the volume.")
pod := framework.MakeWritePod(ns, pvc)
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
By("Deleting the claim")
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
By("Re-mounting the volume.")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
// If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.Logf("Pod exited without failure; the volume has been recycled.")
})
})
})
})

View File

@ -1,126 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PV Protection", func() {
var (
client clientset.Interface
nameSpace string
err error
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
)
f := framework.NewDefaultFramework("pv-protection")
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: nameSpace}
selector = metav1.SetAsLabelSelector(volLabel)
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "hostpath-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp/data",
},
},
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
By("Creating a PV")
// make the pv definitions
pv = framework.MakePersistentVolume(pvConfig)
// create the PV
pv, err = client.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred(), "Error creating PV")
By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PV status")
Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue())
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() {
By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
})
It("Verify that PV bound to a PVC is not removed immediately", func() {
By("Creating a PVC")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating PVC")
By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PV")
By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PV status")
Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
})
})

View File

@ -1,143 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PVC Protection", func() {
var (
client clientset.Interface
nameSpace string
err error
pvc *v1.PersistentVolumeClaim
pvcCreatedAndNotDeleted bool
)
f := framework.NewDefaultFramework("pvc-protection")
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
By("Creating a PVC")
suffix := "pvc-protection"
defaultSC := getDefaultStorageClassName(client)
testStorageClass := storageClassTest{
claimSize: "1Gi",
}
pvc = newClaim(testStorageClass, nameSpace, suffix)
pvc.Spec.StorageClassName = &defaultSC
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating PVC")
pvcCreatedAndNotDeleted = true
By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
By("Checking that PVC Protection finalizer is set")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PVC status")
Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue())
})
AfterEach(func() {
if pvcCreatedAndNotDeleted {
framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace)
}
})
It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() {
By("Deleting the PVC")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
It("Verify that PVC in active use by a pod is not removed immediately", func() {
By("Creating a Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pod, err := framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running")
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() {
By("Creating first Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
firstPod, err := framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running")
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err2).NotTo(HaveOccurred(), "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
By("Deleting the second pod that uses the PVC that is being deleted")
err = framework.DeletePodWithWait(f, client, secondPod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking again that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the first pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, firstPod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
})

View File

@ -1,451 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"fmt"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
pvDeletionTimeout = 3 * time.Minute
statefulSetReadyTimeout = 3 * time.Minute
)
var _ = utils.SIGDescribe("Regional PD", func() {
f := framework.NewDefaultFramework("regional-pd")
// filled in BeforeEach
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessMultizone(c)
})
Describe("RegionalPD", func() {
It("should provision storage [Slow]", func() {
testVolumeProvisioning(c, ns)
})
It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
testZonalFailover(c, ns)
})
})
})
func testVolumeProvisioning(c clientset.Interface, ns string) {
cloudZones := getTwoRandomZones(c)
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []storageClassTest{
{
name: "HDD Regional PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(cloudZones, ","),
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
return verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
},
},
{
name: "HDD Regional PD with auto zone selection on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
zones, err := framework.GetClusterZones(c)
if err != nil {
return err
}
return verifyZonesInPV(volume, zones, false /* match */)
},
},
}
for _, test := range tests {
class := newStorageClass(test, ns, "" /* suffix */)
claim := newClaim(test, ns, "" /* suffix */)
claim.Spec.StorageClassName = &class.Name
testDynamicProvisioning(test, c, claim, class)
}
}
func testZonalFailover(c clientset.Interface, ns string) {
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount := len(nodes.Items)
cloudZones := getTwoRandomZones(c)
class := newRegionalStorageClass(ns, cloudZones)
claimTemplate := newClaimTemplate(ns)
claimTemplate.Spec.StorageClassName = &class.Name
statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns)
By("creating a StorageClass " + class.Name)
_, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil),
"Error deleting StorageClass %s", class.Name)
}()
By("creating a StatefulSet")
_, err = c.CoreV1().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
_, err = c.AppsV1().StatefulSets(ns).Create(statefulSet)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
// typically this claim has already been deleted
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */),
"Error deleting StatefulSet %s", statefulSet.Name)
framework.Logf("deleting claims in namespace %s", ns)
pvc := getPVC(c, ns, regionalPDLabels)
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil),
"Error deleting claim %s.", pvc.Name)
if pvc.Spec.VolumeName != "" {
err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
if err != nil {
framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
}
}
}()
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
if err != nil {
pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
Expect(err).NotTo(HaveOccurred())
}
pvc := getPVC(c, ns, regionalPDLabels)
By("getting zone information from pod")
pod := getPod(c, ns, regionalPDLabels)
nodeName := pod.Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
podZone := node.Labels[apis.LabelZoneFailureDomain]
// TODO (verult) Consider using node taints to simulate zonal failure instead.
By("deleting instance group belonging to pod's zone")
// Asynchronously detect a pod reschedule is triggered during/after instance group deletion.
waitStatus := make(chan error)
go func() {
waitStatus <- waitForStatefulSetReplicasNotReady(statefulSet.Name, ns, c)
}()
cloud, err := framework.GetGCECloud()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
instanceGroupName := framework.TestContext.CloudConfig.NodeInstanceGroup
instanceGroup, err := cloud.GetInstanceGroup(instanceGroupName, podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group %s in zone %s", instanceGroupName, podZone)
templateName, err := framework.GetManagedInstanceGroupTemplateName(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group template in zone %s", podZone)
err = framework.DeleteManagedInstanceGroup(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error deleting instance group in zone %s", podZone)
defer func() {
framework.Logf("recreating instance group %s", instanceGroup.Name)
framework.ExpectNoError(framework.CreateManagedInstanceGroup(instanceGroup.Size, podZone, templateName),
"Error recreating instance group %s in zone %s", instanceGroup.Name, podZone)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, framework.RestartNodeReadyAgainTimeout),
"Error waiting for nodes from the new instance group to become ready.")
}()
err = <-waitStatus
Expect(err).ToNot(HaveOccurred(), "Error waiting for replica to be deleted during failover: %v", err)
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
if err != nil {
pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
Expect(err).NotTo(HaveOccurred())
}
By("verifying the same PVC is used by the new pod")
Expect(getPVC(c, ns, regionalPDLabels).Name).To(Equal(pvc.Name),
"The same PVC should be used after failover.")
By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
pod = getPod(c, ns, regionalPDLabels)
logs, err := framework.GetPodLogs(c, ns, pod.Name, "")
Expect(err).NotTo(HaveOccurred(),
"Error getting logs from pod %s in namespace %s", pod.Name, ns)
lineCount := len(strings.Split(strings.TrimSpace(logs), "\n"))
expectedLineCount := 2
Expect(lineCount).To(Equal(expectedLineCount),
"Line count of the written file should be %d.", expectedLineCount)
// Verify the pod is scheduled in the other zone.
By("verifying the pod is scheduled in a different zone.")
var otherZone string
if cloudZones[0] == podZone {
otherZone = cloudZones[1]
} else {
otherZone = cloudZones[0]
}
nodeName = pod.Spec.NodeName
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
newPodZone := node.Labels[apis.LabelZoneFailureDomain]
Expect(newPodZone).To(Equal(otherZone),
"The pod should be scheduled in zone %s after all nodes in zone %s have been deleted", otherZone, podZone)
}
func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim {
selector := labels.Set(pvcLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(pvcList.Items)).To(Equal(1), "There should be exactly 1 PVC matched.")
return &pvcList.Items[0]
}
func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.Pod {
selector := labels.Set(podLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(Equal(1), "There should be exactly 1 pod matched.")
return &podList.Items[0]
}
// Generates the spec of a StatefulSet with 1 replica that mounts a Regional PD.
func newStatefulSet(claimTemplate *v1.PersistentVolumeClaim, ns string) (sts *appsv1.StatefulSet, svc *v1.Service, labels map[string]string) {
var replicas int32 = 1
labels = map[string]string{"app": "regional-pd-workload"}
svc = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-service",
Namespace: ns,
Labels: labels,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 80,
Name: "web",
}},
ClusterIP: v1.ClusterIPNone,
Selector: labels,
},
}
sts = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-sts",
Namespace: ns,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
ServiceName: svc.Name,
Replicas: &replicas,
Template: *newPodTemplate(labels),
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*claimTemplate},
},
}
return
}
func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
return &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
// This container writes its pod name to a file in the Regional PD
// and prints the entire file to stdout.
{
Name: "busybox",
Image: "k8s.gcr.io/busybox",
Command: []string{"sh", "-c"},
Args: []string{
"echo ${POD_NAME} >> /mnt/data/regional-pd/pods.txt;" +
"cat /mnt/data/regional-pd/pods.txt;" +
"sleep 3600;",
},
Env: []v1.EnvVar{{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
}},
Ports: []v1.ContainerPort{{
ContainerPort: 80,
Name: "web",
}},
VolumeMounts: []v1.VolumeMount{{
Name: "regional-pd-vol",
MountPath: "/mnt/data/regional-pd",
}},
},
},
},
}
}
func newClaimTemplate(ns string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-vol",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
}
func newRegionalStorageClass(namespace string, zones []string) *storage.StorageClass {
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "-sc",
},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(zones, ","),
"replication-type": "regional-pd",
},
}
}
func getTwoRandomZones(c clientset.Interface) []string {
zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred())
Expect(zones.Len()).To(BeNumerically(">=", 2),
"The test should only be run in multizone clusters.")
zone1, _ := zones.PopAny()
zone2, _ := zones.PopAny()
return []string{zone1, zone2}
}
// Waits for at least 1 replica of a StatefulSet to become not ready or until timeout occurs, whichever comes first.
func waitForStatefulSetReplicasNotReady(statefulSetName, ns string, c clientset.Interface) error {
const poll = 3 * time.Second
const timeout = statefulSetReadyTimeout
framework.Logf("Waiting up to %v for StatefulSet %s to have at least 1 replica to become not ready", timeout, statefulSetName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, poll, err)
continue
} else {
if sts.Status.ReadyReplicas < *sts.Spec.Replicas {
framework.Logf("%d replicas are ready out of a total of %d replicas in StatefulSet %s. (%v)",
sts.Status.ReadyReplicas, *sts.Spec.Replicas, statefulSetName, time.Since(start))
return nil
} else {
framework.Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
}
}
}
return fmt.Errorf("All replicas in StatefulSet %s are still ready within %v", statefulSetName, timeout)
}
// If match is true, check if zones in PV exactly match zones given.
// Otherwise, check whether zones in PV is superset of zones given.
func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool) error {
pvZones, err := util.LabelZonesToSet(volume.Labels[apis.LabelZoneFailureDomain])
if err != nil {
return err
}
if match && zones.Equal(pvZones) || !match && zones.IsSuperset(pvZones) {
return nil
}
return fmt.Errorf("Zones in StorageClass are %v, but zones in PV are %v", zones, pvZones)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"framework.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//test/e2e/framework:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,23 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-storage] "+text, body)
}

View File

@ -1,278 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
type KubeletOpt string
const (
NodeStateTimeout = 1 * time.Minute
KStart KubeletOpt = "start"
KStop KubeletOpt = "stop"
KRestart KubeletOpt = "restart"
)
// PodExec wraps RunKubectl to execute a bash cmd in target pod
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
sudoPresent := false
systemctlPresent := false
kubeletPid := ""
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
framework.Logf("Checking if sudo command is present")
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
framework.Logf("Checking if systemctl command is present")
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
} else {
command = "service kubelet status | grep 'Main PID'"
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID")
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
By("Writing to the volume.")
file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
By("Testing that written file is accessible.")
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
By("Expecting the volume subpath mount to be found.")
result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
defer func() {
if err != nil {
KubeletCommand(KStart, c, clientPod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
if err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to be not found.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
if checkSubpath {
By("Expecting the volume subpath mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
}
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
framework.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}

View File

@ -1,208 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
resizePollInterval = 2 * time.Second
// total time to wait for cloudprovider or file system resize to finish
totalResizeWaitPeriod = 20 * time.Minute
)
var _ = utils.SIGDescribe("Volume expand [Slow]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
)
f := framework.NewDefaultFramework("volume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
Expect(resizableSc.AllowVolumeExpansion).NotTo(BeNil())
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = newClaim(test, ns, "default")
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
})
AfterEach(func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace))
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(resizableSc.Name, nil))
})
It("Verify if editing PVC allows resize", func() {
By("Waiting for pvc to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pods for resizing")
defer func() {
err = framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "while cleaning up pod already deleted in resize test")
}()
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Checking for conditions on pvc")
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While fetching pvc after controller resize")
inProgressConditions := pvc.Status.Conditions
Expect(len(inProgressConditions)).To(Equal(1), "pvc must have file system resize pending condition")
Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing")
By("Creating a new pod with same volume")
pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "while recreating pod for resizing")
defer func() {
err = framework.DeletePodWithWait(f, c, pod2)
Expect(err).NotTo(HaveOccurred(), "while cleaning up pod before exiting resizing test")
}()
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})
func createResizableStorageClass(t storageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) {
stKlass := newStorageClass(t, ns, suffix)
allowExpansion := true
stKlass.AllowVolumeExpansion = &allowExpansion
var err error
stKlass, err = c.StorageV1().StorageClasses().Create(stKlass)
return stKlass, err
}
func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := origPVC.Name
updatedPVC := origPVC.DeepCopy()
waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) {
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(pvcName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for resizing with %v", pvcName, err)
}
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(updatedPVC)
if err == nil {
return true, nil
}
framework.Logf("Error updating pvc %s with %v", pvcName, err)
return false, nil
})
return updatedPVC, waitErr
}
func waitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) error {
pvName := pvc.Spec.VolumeName
return wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err)
}
pvSize := pv.Spec.Capacity[v1.ResourceStorage]
// If pv size is greater or equal to requested size that means controller resize is finished.
if pvSize.Cmp(pvcSize) >= 0 {
return true, nil
}
return false, nil
})
}
func waitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
var updatedPVC *v1.PersistentVolumeClaim
waitErr := wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) {
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
}
pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]
pvcStatusSize := updatedPVC.Status.Capacity[v1.ResourceStorage]
//If pvc's status field size is greater than or equal to pvc's size then done
if pvcStatusSize.Cmp(pvcSize) >= 0 {
return true, nil
}
return false, nil
})
return updatedPVC, waitErr
}

View File

@ -1,434 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files. Note that the plugin is defined inline in
* the pod spec, not via a persistent volume and claim.
*
* These tests work only when privileged containers are allowed, exporting various
* filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged
* magic in the server pod. Note that the server containers are for testing purposes
* only and should not be used in production.
*/
package storage
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
minFileSize = 1 * framework.MiB
fileSizeSmall = 1 * framework.MiB
fileSizeMedium = 100 * framework.MiB
fileSizeLarge = 1 * framework.GiB
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
fileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
fileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
fileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file.
func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / minFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
dd_input := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := minFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `minFileSize`
if math.Mod(float64(fsize), float64(minFileSize)) != 0 {
fsize = fsize/minFileSize + minFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}
// These tests need privileged containers which are disabled by default.
// TODO: support all of the plugins tested in storage/volumes.go
var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
f := framework.NewDefaultFramework("volume-io")
var (
config framework.VolumeTestConfig
cs clientset.Interface
ns string
serverIP string
serverPod *v1.Pod
volSource v1.VolumeSource
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
testFile := "nfs_io_test"
// client pod uses selinux
podSec := v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
}
BeforeEach(func() {
config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
volSource = v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium, fileSizeLarge}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
var name string
testFile := "gluster_io_test"
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci")
// create gluster server and endpoints
config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns)
name = config.Prefix + "-server"
volSource = v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil)
framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if epErr != nil || err != nil {
if epErr != nil {
framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err)
}
if err != nil {
framework.Logf("AfterEach: Gluster server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
////////////////////////////////////////////////////////////////////////
Describe("iSCSI [Feature:Volumes]", func() {
testFile := "iscsi_io_test"
BeforeEach(func() {
config, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
volSource = v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph-RBD [Feature:Volumes]", func() {
var (
secret *v1.Secret
)
testFile := "ceph-rbd_io_test"
BeforeEach(func() {
config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
volSource = v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
})

View File

@ -1,456 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/prometheus/common/model"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// This test needs to run in serial because other tests could interfere
// with metrics being tested here.
var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var (
c clientset.Interface
ns string
pvc *v1.PersistentVolumeClaim
metricsGrabber *metrics.MetricsGrabber
)
f := framework.NewDefaultFramework("pv")
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.SkipUnlessProviderIs("gce", "gke", "aws")
defaultScName := getDefaultStorageClassName(c)
verifyDefaultStorageClass(c, defaultScName, true)
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
pvc = newClaim(test, ns, "default")
var err error
metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.Failf("Error creating metrics grabber : %v", err)
}
})
AfterEach(func() {
framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace)
})
It("should create prometheus metrics for volume provisioning and attach/detach", func() {
var err error
if !metricsGrabber.HasRegisteredMaster() {
framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
}
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
Expect(err).NotTo(HaveOccurred(), "Error getting c-m metrics : %v", err)
storageOpMetrics := getControllerStorageMetrics(controllerMetrics)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
updatedStorageMetrics := waitForDetachAndGrabMetrics(storageOpMetrics, metricsGrabber)
Expect(len(updatedStorageMetrics)).ToNot(Equal(0), "Error fetching c-m updated storage metrics")
volumeOperations := []string{"volume_provision", "volume_detach", "volume_attach"}
for _, volumeOp := range volumeOperations {
verifyMetricCount(storageOpMetrics, updatedStorageMetrics, volumeOp)
}
})
It("should create volume metrics with the correct PVC ref", func() {
var err error
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodRunningInNamespace(c, pod)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Verify volume stat metrics were collected for the referenced PVC
volumeStatKeys := []string{
kubeletmetrics.VolumeStatsUsedBytesKey,
kubeletmetrics.VolumeStatsCapacityBytesKey,
kubeletmetrics.VolumeStatsAvailableBytesKey,
kubeletmetrics.VolumeStatsUsedBytesKey,
kubeletmetrics.VolumeStatsInodesFreeKey,
kubeletmetrics.VolumeStatsInodesUsedKey,
}
// Poll kubelet metrics waiting for the volume to be picked up
// by the volume stats collector
var kubeMetrics metrics.KubeletMetrics
waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
framework.Logf("Grabbing Kubelet metrics")
// Grab kubelet metrics from the node the pod was scheduled on
var err error
kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
if err != nil {
framework.Logf("Error fetching kubelet metrics")
return false, err
}
key := volumeStatKeys[0]
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
if !findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics) {
return false, nil
}
return true, nil
})
Expect(waitErr).NotTo(HaveOccurred(), "Error finding volume metrics : %v", waitErr)
for _, key := range volumeStatKeys {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics)
Expect(found).To(BeTrue(), "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName)
}
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count.
Describe("PVController", func() {
const (
classKey = "storage_class"
namespaceKey = "namespace"
boundPVKey = "pv_collector_bound_pv_count"
unboundPVKey = "pv_collector_unbound_pv_count"
boundPVCKey = "pv_collector_bound_pvc_count"
unboundPVCKey = "pv_collector_unbound_pvc_count"
)
var (
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
className = "bound-unbound-count-test-sc"
pvConfig = framework.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/data"},
},
NamePrefix: "pv-test-",
StorageClassName: className,
}
pvcConfig = framework.PersistentVolumeClaimConfig{StorageClassName: &className}
metrics = []struct {
name string
dimension string
}{
{boundPVKey, classKey},
{unboundPVKey, classKey},
{boundPVCKey, namespaceKey},
{unboundPVCKey, namespaceKey},
}
// Original metric values before we create any PV/PVCs. The length should be 4,
// and the elements should be bound pv count, unbound pv count, bound pvc count,
// unbound pvc count in turn.
// We use these values to calculate relative increment of each test.
originMetricValues []map[string]int64
)
// validator used to validate each metric's values, the length of metricValues
// should be 4, and the elements should be bound pv count, unbound pv count, bound
// pvc count, unbound pvc count in turn.
validator := func(metricValues []map[string]int64) {
Expect(len(metricValues)).To(Equal(4),
"Wrong metric size: %d", len(metricValues))
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err)
for i, metric := range metrics {
expectValues := metricValues[i]
if expectValues == nil {
expectValues = make(map[string]int64)
}
// We using relative increment value instead of absolute value to reduce unexpected flakes.
// Concretely, we expect the difference of the updated values and original values for each
// test suit are equal to expectValues.
actualValues := calculateRelativeValues(originMetricValues[i],
getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension))
Expect(actualValues).To(Equal(expectValues),
"Wrong pv controller metric %s(%s): wanted %v, got %v",
metric.name, metric.dimension, expectValues, actualValues)
}
}
BeforeEach(func() {
if !metricsGrabber.HasRegisteredMaster() {
framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
}
pv = framework.MakePersistentVolume(pvConfig)
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
// Initializes all original metric values.
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err)
for _, metric := range metrics {
originMetricValues = append(originMetricValues,
getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension))
}
})
AfterEach(func() {
if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
framework.Failf("Error deleting pv: %v", err)
}
if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
framework.Failf("Error deleting pvc: %v", err)
}
// Clear original metric values.
originMetricValues = nil
})
It("should create none metrics for pvc controller before creating any PV or PVC", func() {
validator([]map[string]int64{nil, nil, nil, nil})
})
It("should create unbound pv count metrics for pvc controller after creating pv only",
func() {
var err error
pv, err = framework.CreatePV(c, pv)
Expect(err).NotTo(HaveOccurred(), "Error creating pv: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey)
validator([]map[string]int64{nil, {className: 1}, nil, nil})
})
It("should create unbound pvc count metrics for pvc controller after creating pvc only",
func() {
var err error
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVCKey, namespaceKey)
validator([]map[string]int64{nil, nil, nil, {ns: 1}})
})
It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc",
func() {
var err error
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred(), "Error creating pv pvc: %v", err)
waitForPVControllerSync(metricsGrabber, boundPVKey, classKey)
waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey)
validator([]map[string]int64{{className: 1}, nil, {ns: 1}, nil})
})
})
})
func waitForDetachAndGrabMetrics(oldMetrics map[string]int64, metricsGrabber *metrics.MetricsGrabber) map[string]int64 {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
Steps: 21,
}
updatedStorageMetrics := make(map[string]int64)
oldDetachCount, ok := oldMetrics["volume_detach"]
if !ok {
oldDetachCount = 0
}
verifyMetricFunc := func() (bool, error) {
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Logf("Error fetching controller-manager metrics")
return false, err
}
updatedStorageMetrics = getControllerStorageMetrics(updatedMetrics)
newDetachCount, ok := updatedStorageMetrics["volume_detach"]
// if detach metrics are not yet there, we need to retry
if !ok {
return false, nil
}
// if old Detach count is more or equal to new detach count, that means detach
// event has not been observed yet.
if oldDetachCount >= newDetachCount {
return false, nil
}
return true, nil
}
waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc)
Expect(waitErr).NotTo(HaveOccurred(), "Timeout error fetching storage c-m metrics : %v", waitErr)
return updatedStorageMetrics
}
func verifyMetricCount(oldMetrics map[string]int64, newMetrics map[string]int64, metricName string) {
oldCount, ok := oldMetrics[metricName]
// if metric does not exist in oldMap, it probably hasn't been emitted yet.
if !ok {
oldCount = 0
}
newCount, ok := newMetrics[metricName]
Expect(ok).To(BeTrue(), "Error getting updated metrics for %s", metricName)
// It appears that in a busy cluster some spurious detaches are unavoidable
// even if the test is run serially. We really just verify if new count
// is greater than old count
Expect(newCount).To(BeNumerically(">", oldCount), "New count %d should be more than old count %d for action %s", newCount, oldCount, metricName)
}
func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) map[string]int64 {
result := make(map[string]int64)
for method, samples := range ms {
if method != "storage_operation_duration_seconds_count" {
continue
}
for _, sample := range samples {
count := int64(sample.Value)
operation := string(sample.Metric["operation_name"])
result[operation] = count
}
}
return result
}
// Finds the sample in the specified metric from `KubeletMetrics` tagged with
// the specified namespace and pvc name
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool {
found := false
errCount := 0
framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
if samples, ok := kubeletMetrics[metricKeyName]; ok {
for _, sample := range samples {
framework.Logf("Found sample %s", sample.String())
samplePVC, ok := sample.Metric["persistentvolumeclaim"]
if !ok {
framework.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
errCount++
}
sampleNS, ok := sample.Metric["namespace"]
if !ok {
framework.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
errCount++
}
if string(samplePVC) == pvcName && string(sampleNS) == namespace {
found = true
break
}
}
}
Expect(errCount).To(Equal(0), "Found invalid samples")
return found
}
// Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero.
func waitForPVControllerSync(metricsGrabber *metrics.MetricsGrabber, metricName, dimension string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
Steps: 21,
}
verifyMetricFunc := func() (bool, error) {
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Logf("Error fetching controller-manager metrics")
return false, err
}
return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil
}
waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc)
Expect(waitErr).NotTo(HaveOccurred(),
"Timeout error fetching pv controller metrics : %v", waitErr)
}
func getPVControllerMetrics(ms metrics.ControllerManagerMetrics, metricName, dimension string) map[string]int64 {
result := make(map[string]int64)
for method, samples := range ms {
if method != metricName {
continue
}
for _, sample := range samples {
count := int64(sample.Value)
dimensionName := string(sample.Metric[model.LabelName(dimension)])
result[dimensionName] = count
}
}
return result
}
func calculateRelativeValues(originValues, updatedValues map[string]int64) map[string]int64 {
relativeValues := make(map[string]int64)
for key, value := range updatedValues {
relativeValue := value - originValues[key]
if relativeValue != 0 {
relativeValues[key] = relativeValue
}
}
for key, value := range originValues {
if _, exist := updatedValues[key]; !exist && value > 0 {
relativeValues[key] = -value
}
}
return relativeValues
}

File diff suppressed because it is too large Load Diff

View File

@ -1,572 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
package storage
import (
"os/exec"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
)
func DeleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default.
var _ = utils.SIGDescribe("Volumes", func() {
f := framework.NewDefaultFramework("volume")
// note that namespace deletion is handled by delete-namespace flag
// filled inside BeforeEach
var cs clientset.Interface
var namespace *v1.Namespace
BeforeEach(func() {
cs = f.ClientSet
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/nfs/index.html
ExpectedContent: "Hello from NFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
//TODO (copejon) GFS is not supported on debian image.
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
// create gluster server and endpoints
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
name := config.Prefix + "-server"
defer func() {
framework.VolumeTestCleanup(f, config)
err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
////////////////////////////////////////////////////////////////////////
// The test needs privileged containers, which are disabled by default.
// Also, make sure that iscsiadm utility and iscsi target kernel modules
// are installed on all nodes!
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
Describe("iSCSI [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
ExpectedContent: "Hello from iSCSI",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph RBD [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/rbd/create_block.sh
ExpectedContent: "Hello from RBD",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph
////////////////////////////////////////////////////////////////////////
Describe("CephFS [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/ceph/index.html
ExpectedContent: "Hello Ceph!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// OpenStack Cinder
////////////////////////////////////////////////////////////////////////
// This test assumes that OpenStack client tools are installed
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
// and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
Describe("Cinder [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("openstack")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cinder",
}
// We assume that namespace.Name is a random string
volumeName := namespace.Name
By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred())
defer DeleteCinderVolume(volumeName)
// Parse 'id'' from stdout. Expected format:
// | attachments | [] |
// | availability_zone | nova |
// ...
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
volumeID := ""
for _, line := range strings.Split(outputString, "\n") {
fields := strings.Fields(line)
if len(fields) != 5 {
continue
}
if fields[1] != "id" {
continue
}
volumeID = fields[3]
break
}
framework.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal(""))
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// GCE PD
////////////////////////////////////////////////////////////////////////
Describe("PD", func() {
var config framework.VolumeTestConfig
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
config = framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "pd",
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
NodeSelector: map[string]string{
kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
},
}
})
It("should be mountable with ext3", func() {
testGCEPD(f, config, cs, "ext3")
})
It("should be mountable with ext4", func() {
testGCEPD(f, config, cs, "ext4")
})
It("should be mountable with xfs", func() {
// xfs is not supported on gci
// and not installed by default on debian
framework.SkipUnlessNodeOSDistroIs("ubuntu")
testGCEPD(f, config, cs, "xfs")
})
})
////////////////////////////////////////////////////////////////////////
// ConfigMap
////////////////////////////////////////////////////////////////////////
Describe("ConfigMap", func() {
It("should be mountable", func() {
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "configmap",
}
defer framework.VolumeTestCleanup(f, config)
configMap := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-map",
},
Data: map[string]string{
"first": "this is the first file",
"second": "this is the second file",
"third": "this is the third file",
},
}
if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configmap: %v", err)
}
defer func() {
_ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
}()
// Test one ConfigMap mounted several times to test #28502
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.Prefix + "-map",
},
Items: []v1.KeyToPath{
{
Key: "first",
Path: "firstfile",
},
},
},
},
File: "firstfile",
ExpectedContent: "this is the first file",
},
{
Volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.Prefix + "-map",
},
Items: []v1.KeyToPath{
{
Key: "second",
Path: "secondfile",
},
},
},
},
File: "secondfile",
ExpectedContent: "this is the second file",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// vSphere
////////////////////////////////////////////////////////////////////////
Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Azure Disk
////////////////////////////////////////////////////////////////////////
Describe("Azure Disk [Feature:Volumes]", func() {
It("should be mountable [Slow]", func() {
framework.SkipUnlessProviderIs("azure")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "azure",
}
By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePDWithRetry(volumeName)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
fsType := "ext4"
readOnly := false
diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):]
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: volumeName,
FSType: &fsType,
ReadOnly: &readOnly,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Azure from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
})
func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) {
By("creating a test gce pd volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
// - Get NodeName from the pod spec to which the volume is mounted.
// - Force detach and delete.
pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client")
detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)})
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: fs,
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from GCE from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
}

View File

@ -1,80 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"bootstrap.go",
"config.go",
"connection.go",
"context.go",
"nodemapper.go",
"persistent_volumes-vsphere.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",
"vsphere.go",
"vsphere_common.go",
"vsphere_scale.go",
"vsphere_statefulsets.go",
"vsphere_stress.go",
"vsphere_utils.go",
"vsphere_volume_cluster_ds.go",
"vsphere_volume_datastore.go",
"vsphere_volume_diskformat.go",
"vsphere_volume_disksize.go",
"vsphere_volume_fstype.go",
"vsphere_volume_master_restart.go",
"vsphere_volume_node_delete.go",
"vsphere_volume_node_poweroff.go",
"vsphere_volume_ops_storm.go",
"vsphere_volume_perf.go",
"vsphere_volume_placement.go",
"vsphere_volume_vpxd_restart.go",
"vsphere_volume_vsan_policy.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/volume/util:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,59 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"sync"
)
var once sync.Once
var waiting = make(chan bool)
var f *framework.Framework
// Bootstrap takes care of initializing necessary test context for vSphere tests
func Bootstrap(fw *framework.Framework) {
done := make(chan bool)
f = fw
go func() {
once.Do(bootstrapOnce)
<-waiting
done <- true
}()
<-done
}
func bootstrapOnce() {
// 1. Read vSphere conf and get VSphere instances
vsphereInstances, err := GetVSphereInstances()
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 2. Get all nodes
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
// 3. Get Node to VSphere mapping
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
close(waiting)
}

View File

@ -1,180 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"fmt"
"gopkg.in/gcfg.v1"
"io"
"k8s.io/kubernetes/test/e2e/framework"
"os"
)
const (
vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
)
var (
confFileLocation = os.Getenv(vSphereConfFileEnvVar)
)
// Config represents vSphere configuration
type Config struct {
Username string
Password string
Hostname string
Port string
Datacenters string
RoundTripperCount uint
DefaultDatastore string
Folder string
}
// ConfigFile represents the content of vsphere.conf file.
// Users specify the configuration of one or more vSphere instances in vsphere.conf where
// the Kubernetes master and worker nodes are running.
type ConfigFile struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
}
VirtualCenter map[string]*Config
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
// Endpoint used to create volumes
Workspace struct {
VCenterIP string `gcfg:"server"`
Datacenter string `gcfg:"datacenter"`
Folder string `gcfg:"folder"`
DefaultDatastore string `gcfg:"default-datastore"`
ResourcePoolPath string `gcfg:"resourcepool-path"`
}
}
// GetVSphereInstances parses vsphere.conf and returns VSphere instances
func GetVSphereInstances() (map[string]*VSphere, error) {
cfg, err := getConfig()
if err != nil {
return nil, err
}
return populateInstanceMap(cfg)
}
func getConfig() (*ConfigFile, error) {
if confFileLocation == "" {
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
}
confFile, err := os.Open(confFileLocation)
if err != nil {
return nil, err
}
defer confFile.Close()
cfg, err := readConfig(confFile)
if err != nil {
return nil, err
}
return &cfg, nil
}
// readConfig parses vSphere cloud config file into ConfigFile.
func readConfig(config io.Reader) (ConfigFile, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return ConfigFile{}, err
}
var cfg ConfigFile
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
vsphereInstances := make(map[string]*VSphere)
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
msg := fmt.Sprintf("All fields in workspace are mandatory."+
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
framework.Logf(msg)
return nil, errors.New(msg)
}
for vcServer, vcConfig := range cfg.VirtualCenter {
framework.Logf("Initializing vc server %s", vcServer)
if vcServer == "" {
framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
}
vcConfig.Hostname = vcServer
if vcConfig.Username == "" {
vcConfig.Username = cfg.Global.User
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
}
if vcConfig.Username == "" {
msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer)
framework.Logf(msg)
return nil, errors.New(msg)
}
if vcConfig.Password == "" {
msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
framework.Logf(msg)
return nil, errors.New(msg)
}
if vcConfig.Port == "" {
vcConfig.Port = cfg.Global.VCenterPort
}
if vcConfig.Datacenters == "" && cfg.Global.Datacenters != "" {
vcConfig.Datacenters = cfg.Global.Datacenters
}
if vcConfig.RoundTripperCount == 0 {
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
}
vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore
vcConfig.Folder = cfg.Workspace.Folder
vsphereIns := VSphere{
Config: vcConfig,
}
vsphereInstances[vcServer] = &vsphereIns
}
framework.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
return vsphereInstances, nil
}

View File

@ -1,91 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
neturl "net/url"
"sync"
"github.com/golang/glog"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
)
const (
roundTripperDefaultCount = 3
)
var (
clientLock sync.Mutex
)
// Connect makes connection to vSphere
// No actions are taken if a connection exists and alive. Otherwise, a new client will be created.
func Connect(ctx context.Context, vs *VSphere) error {
var err error
clientLock.Lock()
defer clientLock.Unlock()
if vs.Client == nil {
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
manager := session.NewManager(vs.Client.Client)
userSession, err := manager.UserSession(ctx)
if err != nil {
glog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
vs.Client.Logout(ctx)
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
// NewClient creates a new client for vSphere connection
func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) {
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", vs.Config.Hostname, vs.Config.Port))
if err != nil {
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password)
client, err := govmomi.NewClient(ctx, url, true)
if err != nil {
glog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
if vs.Config.RoundTripperCount == 0 {
vs.Config.RoundTripperCount = roundTripperDefaultCount
}
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(vs.Config.RoundTripperCount)))
return client, nil
}

View File

@ -1,26 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// Context holds common information for vSphere tests
type VSphereContext struct {
NodeMapper *NodeMapper
VSphereInstances map[string]*VSphere
}
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
var TestContext VSphereContext

View File

@ -1,134 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"errors"
"strings"
"sync"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
type NodeMapper struct {
}
type NodeInfo struct {
Name string
DataCenterRef types.ManagedObjectReference
VirtualMachineRef types.ManagedObjectReference
VSphere *VSphere
}
var (
nameToNodeInfo = make(map[string]*NodeInfo)
)
// GenerateNodeMap populates node name to node info map
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
type VmSearch struct {
vs *VSphere
datacenter *object.Datacenter
}
var wg sync.WaitGroup
var queueChannel []*VmSearch
var datacenters []*object.Datacenter
var err error
for _, vs := range vSphereInstances {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if vs.Config.Datacenters == "" {
datacenters, err = vs.GetAllDatacenter(ctx)
if err != nil {
framework.Logf("NodeMapper error: %v", err)
continue
}
} else {
dcName := strings.Split(vs.Config.Datacenters, ",")
for _, dc := range dcName {
dc = strings.TrimSpace(dc)
if dc == "" {
continue
}
datacenter, err := vs.GetDatacenter(ctx, dc)
if err != nil {
framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
continue
}
datacenters = append(datacenters, datacenter)
}
}
for _, dc := range datacenters {
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
}
}
for _, node := range nodeList.Items {
n := node
go func() {
nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
framework.Logf("Searching for node with UUID: %s", nodeUUID)
for _, res := range queueChannel {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
if err != nil {
framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
continue
}
if vm != nil {
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
nm.SetNodeInfo(n.Name, nodeInfo)
break
}
}
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
if len(nameToNodeInfo) != len(nodeList.Items) {
return errors.New("all nodes not mapped to respective vSphere")
}
return nil
}
// GetNodeInfo return NodeInfo for given nodeName
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
return nameToNodeInfo[nodeName]
}
// SetNodeInfo sets NodeInfo for given nodeName. This function is not thread safe. Users need to handle concurrency.
func (nm *NodeMapper) SetNodeInfo(nodeName string, nodeInfo *NodeInfo) {
nameToNodeInfo[nodeName] = nodeInfo
}

View File

@ -1,217 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// Testing configurations of single a PV/PVC pair attached to a vSphere Disk
var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
var (
c clientset.Interface
ns string
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
err error
node string
volLabel labels.Set
selector *metav1.LabelSelector
nodeInfo *NodeInfo
)
f := framework.NewDefaultFramework("pv")
/*
Test Setup
1. Create volume (vmdk)
2. Create PV with volume path for the vmdk.
3. Create PVC to bind with PV.
4. Create a POD using the PVC.
5. Verify Disk and Attached to the node.
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
clientPod = nil
pvc = nil
pv = nil
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
if volumePath == "" {
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = framework.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
}
By("Creating the PV and PVC")
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
}
}
})
/*
Clean up
1. Wait and verify volume is detached from the node
2. Delete PV
3. Delete Volume (vmdk)
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
})
/*
Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PVC.
2. Delete POD, POD deletion should succeed.
*/
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
})
/*
Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PV.
2. Delete POD, POD deletion should succeed.
*/
It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() {
By("Deleting the Persistent Volume")
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
pv = nil
By("Deleting the pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
})
/*
This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts.
Steps:
1. Write to the volume
2. Restart kubelet
3. Verify that written file is accessible after kubelet restart
*/
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
})
/*
This test verifies that a volume mounted to a pod that is deleted while the kubelet is down
unmounts volume when the kubelet returns.
Steps:
1. Verify volume is mounted on the node.
2. Stop kubelet.
3. Delete pod.
4. Start kubelet.
5. Verify that volume mount not to be found.
*/
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
})
/*
This test verifies that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk
Steps:
1. Delete Namespace.
2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods)
3. Verify volume should be detached from the node.
*/
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() {
By("Deleting the Namespace")
err := c.CoreV1().Namespaces().Delete(ns, nil)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute)
Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(volumePath, node)
})
})

View File

@ -1,249 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
f := framework.NewDefaultFramework("persistentvolumereclaim")
var (
c clientset.Interface
ns string
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
nodeInfo *NodeInfo
)
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
})
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
pvc = nil
volumePath = ""
})
AfterEach(func() {
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
})
/*
This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and
associated claim is deleted
Test Steps:
1. Create vmdk
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete
3. Create PVC with the storage request set to PV's storage capacity.
4. Wait for PV and PVC to bound.
5. Delete PVC
6. Verify PV is deleted automatically.
*/
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
deletePVCAfterBind(c, ns, pvc, pv)
pvc = nil
By("verify pv is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
pv = nil
volumePath = ""
})
/*
Test Steps:
1. Create vmdk
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete
3. Create PVC with the storage request set to PV's storage capacity.
4. Wait for PV and PVC to bound.
5. Delete PVC.
6. Verify volume is attached to the node and volume is accessible in the pod.
7. Verify PV status should be failed.
8. Delete the pod.
9. Verify PV should be detached from the node and automatically deleted.
*/
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
// Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
// Verify PV is Present, after PVC is deleted and PV status should be Failed.
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
Expect(isVolumeAttached).To(BeTrue())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred())
By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
pv = nil
volumePath = ""
})
/*
This test Verify persistent volume should be retained when reclaimPolicy on the PV is set to retain
and associated claim is deleted
Test Steps:
1. Create vmdk
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Retain
3. Create PVC with the storage request set to PV's storage capacity.
4. Wait for PV and PVC to bound.
5. Write some content in the volume.
6. Delete PVC
7. Verify PV is retained.
8. Delete retained PV.
9. Create PV Spec with the same volume path used in step 2.
10. Create PVC with the storage request set to PV's storage capacity.
11. Created POD using PVC created in Step 10 and verify volume content is matching.
*/
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var err error
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred())
writeContentToVSpherePV(c, pvc, volumeFileContent)
By("Delete PVC")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc = nil
By("Verify PV is retained")
framework.Logf("Waiting for PV %v to become Released", pv.Name)
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
By("Creating the PV for same volume path")
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
})
})
})
// Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk")
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
By("creating the pv")
pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
if err != nil {
return
}
By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
return
}
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 {
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}
}
// func to wait until PV and PVC bind and once bind completes, delete the PVC
func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
var err error
By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("delete pvc")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -1,150 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
This is a function test for Selector-Label Volume Binding Feature
Test verifies volume with the matching label is bounded with the PVC.
Test Steps
----------
1. Create VMDK.
2. Create pv with lable volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
5. Wait and verify pvc_ssd is bound with PV.
6. Verify Status of pvc_vvol is still pending.
7. Delete pvc_ssd.
8. verify associated pv is also deleted.
9. delete pvc_vvol
*/
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
f := framework.NewDefaultFramework("pvclabelselector")
var (
c clientset.Interface
ns string
pv_ssd *v1.PersistentVolume
pvc_ssd *v1.PersistentVolumeClaim
pvc_vvol *v1.PersistentVolumeClaim
volumePath string
ssdlabels map[string]string
vvollabels map[string]string
err error
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string)
vvollabels["volume-type"] = "vvol"
})
utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() {
AfterEach(func() {
By("Running clean up actions")
if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
}
})
It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred())
By("wait for the pvc_ssd to bind with pv_ssd")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
By("Verify status of pvc_vvol is pending")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
By("delete pvc_ssd")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
By("verify pv_ssd is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
volumePath = ""
By("delete pvc_vvol")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
})
})
})
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = ""
By("creating vmdk")
Expect(err).NotTo(HaveOccurred())
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
By("creating the pv with lable volume-type:ssd")
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
if err != nil {
return
}
By("creating pvc with label selector to match with volume-type:vvol")
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
if err != nil {
return
}
By("creating pvc with label selector to match with volume-type:ssd")
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
return
}
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
if pvc_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
}
if pvc_vvol != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
}
if pv_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Faled to delete PV ", pv_ssd.Name)
}
}

View File

@ -1,241 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
VolDir = "kubevols"
DefaultDiskCapacityKB = 2097152
DefaultDiskFormat = "thin"
DefaultSCSIControllerType = "lsiLogic"
VirtualMachineType = "VirtualMachine"
)
// Represents a vSphere instance where one or more kubernetes nodes are running.
type VSphere struct {
Config *Config
Client *govmomi.Client
}
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
Name string
CapacityKB int
DiskFormat string
SCSIControllerType string
Datastore string
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, false)
return finder.Datacenter(ctx, datacenterPath)
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
Connect(ctx, vs)
return object.NewDatacenter(vs.Client.Client, dc.Reference())
}
// GetAllDatacenter returns all the DataCenter Objects
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, false)
return finder.DatacenterList(ctx, "*")
}
// GetVMByUUID gets the VM object Reference from the given vmUUID
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
Connect(ctx, vs)
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
s := object.NewSearchIndex(vs.Client.Client)
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
}
// GetFolderByPath gets the Folder Object Reference from the given folder path
// folderPath should be the full path to folder
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(datacenter.Client(), false)
finder.SetDatacenter(datacenter)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return vmFolderMor, err
}
return vmFolder.Reference(), nil
}
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
// If volume is created successfully the canonical disk path is returned else error is returned.
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
var (
err error
directoryAlreadyPresent = false
)
if datacenter == nil {
return "", fmt.Errorf("datacenter is nil")
}
vs.initVolumeOptions(volumeOptions)
finder := find.NewFinder(datacenter.Client(), false)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
}
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
fileManager := object.NewFileManager(ds.Client())
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
directoryAlreadyPresent = true
framework.Logf("Directory with the path %+q is already present", directoryPath)
}
}
if !directoryAlreadyPresent {
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
return "", err
}
}
framework.Logf("Created dir with path as %+q", directoryPath)
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(ds.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: volumeOptions.SCSIControllerType,
DiskType: volumeOptions.DiskFormat,
},
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
if err != nil {
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
return "", err
}
volumePath := taskInfo.Result.(string)
canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath)
if err != nil {
return "", err
}
return canonicalDiskPath, nil
}
// DeleteVolume deletes the vmdk file specified in the volumePath.
// if an error is encountered while deleting volume, error is returned.
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath)
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
err = task.Wait(ctx)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}
// IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory.
// if VM is present, function returns true else false.
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder)
if err != nil {
return
}
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
vmFoldersChildren, err := vmFolder.Children(ctx)
if err != nil {
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
return
}
for _, vmFoldersChild := range vmFoldersChildren {
if vmFoldersChild.Reference().Type == VirtualMachineType {
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
return true, nil
}
}
}
return
}
// initVolumeOptions function sets default values for volumeOptions parameters if not set
func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
}

View File

@ -1,66 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
. "github.com/onsi/gomega"
"os"
"strconv"
)
const (
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
StorageClassDatastoreName = "VSPHERE_DATASTORE"
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
)
const (
VCPClusterDatastore = "CLUSTER_DATASTORE"
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
)
const (
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
VCPScaleInstances = "VCP_SCALE_INSTANCES"
)
const (
VCPStressInstances = "VCP_STRESS_INSTANCES"
VCPStressIterations = "VCP_STRESS_ITERATIONS"
)
const (
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
VCPPerfIterations = "VCP_PERF_ITERATIONS"
)
func GetAndExpectStringEnvVar(varName string) string {
varValue := os.Getenv(varName)
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set")
return varValue
}
func GetAndExpectIntEnvVar(varName string) int {
varValue := GetAndExpectStringEnvVar(varName)
varIntValue, err := strconv.Atoi(varValue)
Expect(err).NotTo(HaveOccurred(), "Error Parsing "+varName)
return varIntValue
}

View File

@ -1,233 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
The following actions will be performed as part of this test.
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
5. Once all the go routines are completed, we delete all the pods and volumes.
*/
const (
NodeLabelKey = "vsphere_e2e_label"
)
// NodeSelector holds
type NodeSelector struct {
labelKey string
labelValue string
}
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
f := framework.NewDefaultFramework("vcp-at-scale")
var (
client clientset.Interface
namespace string
nodeSelectorList []*NodeSelector
volumeCount int
numberOfInstances int
volumesPerPod int
policyName string
datastoreName string
nodeVolumeMapChan chan map[string][]string
nodes *v1.NodeList
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string)
// Read the environment variables
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes = framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
// Verify volume count specified by the user can be satisfied
if volumeCount > volumesPerNode*len(nodes.Items) {
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
}
nodeSelectorList = createNodeLabels(client, namespace, nodes)
})
/*
Remove labels from all the nodes
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 {
for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
}
}
})
It("vsphere scale tests", func() {
var pvcClaimList []string
nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %q", scname))
var sc *storageV1.StorageClass
scParams := make(map[string]string)
var err error
switch scname {
case storageclass1:
scParams = nil
case storageclass2:
scParams[Policy_HostFailuresToTolerate] = "1"
case storageclass3:
scParams[SpbmStoragePolicy] = policyName
case storageclass4:
scParams[Datastore] = datastoreName
}
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams))
Expect(sc).NotTo(BeNil(), "Storage class is empty")
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc
}
volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
}
}
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
for _, pod := range podList.Items {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
Expect(err).NotTo(HaveOccurred())
}
})
})
// Get PVC claims for the pod
func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
pvcClaimList := make([]string, volumesPerPod)
for i, volumespec := range pod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
}
}
return pvcClaimList
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
if (volumeCountPerInstance - index) < volumesPerPod {
volumesPerPod = volumeCountPerInstance - index
}
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
Expect(err).NotTo(HaveOccurred())
pvclaims[i] = pvclaim
}
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
for _, pv := range persistentvolumes {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap
close(nodeVolumeMapChan)
}
func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
var nodeSelectorList []*NodeSelector
for i, node := range nodes.Items {
labelVal := "vsphere_e2e_" + strconv.Itoa(i)
nodeSelector := &NodeSelector{
labelKey: NodeLabelKey,
labelValue: labelVal,
}
nodeSelectorList = append(nodeSelectorList, nodeSelector)
framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
}
return nodeSelectorList
}

View File

@ -1,153 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test performs following operations
Steps
1. Create a storage class with thin diskformat.
2. Create nginx service.
3. Create nginx statefulsets with 3 replicas.
4. Wait until all Pods are ready and PVCs are bounded with PV.
5. Verify volumes are accessible in all statefulsets pods with creating empty file.
6. Scale down statefulsets to 2 replicas.
7. Scale up statefulsets to 4 replicas.
8. Scale down statefulsets to 0 replicas and delete all pods.
9. Delete all PVCs from the test namespace.
10. Delete the storage class.
*/
const (
manifestPath = "test/e2e/testing-manifests/statefulset/nginx"
mountPath = "/usr/share/nginx/html"
storageclassname = "nginx-sc"
)
var _ = utils.SIGDescribe("vsphere statefulset", func() {
f := framework.NewDefaultFramework("vsphere-statefulset")
var (
namespace string
client clientset.Interface
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
namespace = f.Namespace.Name
client = f.ClientSet
Bootstrap(f)
})
AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
framework.DeleteAllStatefulSets(client, namespace)
})
It("vsphere statefulset testing", func() {
By("Creating StorageClass for Statefulset")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters)
sc, err := client.StorageV1().StorageClasses().Create(scSpec)
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
By("Creating statefulset")
statefulsetTester := framework.NewStatefulSetTester(client)
statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace)
replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
Expect(statefulsetTester.CheckMount(statefulset, mountPath)).NotTo(HaveOccurred())
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
// Get the list of Volumes attached to Pods before scale down
volumesBeforeScaleDown := make(map[string]string)
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName
}
}
}
By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
Expect(scaledownErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
// After scale down, verify vsphere volumes are detached from deleted pods
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
if err != nil {
Expect(apierrs.IsNotFound(err), BeTrue())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred())
}
}
}
}
By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
Expect(scaleupErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset)
Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
// After scale up, verify all vsphere volumes are attached to node VMs.
By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
for _, sspod := range ssPodsAfterScaleUp.Items {
err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
Expect(err).NotTo(HaveOccurred())
pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for _, volumespec := range pod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
}
}
}
})
})

View File

@ -1,172 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"sync"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
The following actions will be performed as part of this test.
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
3. Launch goroutine for volume lifecycle operations.
4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
*/
var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-stress")
var (
client clientset.Interface
namespace string
instances int
iterations int
policyName string
datastoreName string
err error
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
instances = GetAndExpectIntEnvVar(VCPStressInstances)
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
})
It("vsphere stress tests", func() {
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %v", scname))
var sc *storageV1.StorageClass
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
Expect(sc).NotTo(BeNil())
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc
}
var wg sync.WaitGroup
wg.Add(instances)
for instanceCount := 0; instanceCount < instances; instanceCount++ {
instanceId := fmt.Sprintf("Thread:%v", instanceCount+1)
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg)
}
wg.Wait()
})
})
// goroutine to perform volume lifecycle operations in parallel
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done()
defer GinkgoRecover()
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred())
// Get the copy of the Pod to know the assigned node name.
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred())
}
}

View File

@ -1,822 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"math/rand"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
volumesPerNode = 55
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
DummyDiskName = "kube-dummyDisk.vmdk"
ProviderPrefix = "vsphere://"
)
// volumeState represents the state of a volume.
type volumeState int32
const (
volumeStateDetached volumeState = 1
volumeStateAttached volumeState = 2
)
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
var (
err error
disksAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := disksAreAttached(nodeVolumes)
if err != nil {
return false, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if attached {
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
return false, nil
}
}
}
disksAttached = false
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
return true, nil
})
if err != nil {
return err
}
if disksAttached {
return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
}
return nil
}
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
var (
err error
diskAttached bool
currentState volumeState
timeout = 6 * time.Minute
pollTime = 10 * time.Second
)
var attachedState = map[bool]volumeState{
true: volumeStateAttached,
false: volumeStateDetached,
}
var attachedStateMsg = map[volumeState]string{
volumeStateAttached: "attached to",
volumeStateDetached: "detached from",
}
err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = diskIsAttached(volumePath, nodeName)
if err != nil {
return true, err
}
currentState = attachedState[diskAttached]
if currentState == expectedState {
framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
return true, nil
}
framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
return false, nil
})
if err != nil {
return err
}
if currentState != expectedState {
err = fmt.Errorf("Gave up waiting for Volume %q to be %s %q after %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout)
}
return err
}
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
}
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
var (
pvConfig framework.PersistentVolumeConfig
pv *v1.PersistentVolume
claimRef *v1.ObjectReference
)
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
Prebind: nil,
}
pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Annotations: map[string]string{
util.VolumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
PersistentVolumeSource: pvConfig.PVSource,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: claimRef,
},
}
if labels != nil {
pv.Labels = labels
}
return pv
}
// function to get vsphere persistent volume spec with given selector labels.
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
var (
pvc *v1.PersistentVolumeClaim
)
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
}
if labels != nil {
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
return pvc
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
framework.Logf("Done with writing content to volume")
}
// function to verify content is matching on the volume backed for given PVC
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
framework.Logf("Successfully verified content of the volume")
}
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
var sc *storage.StorageClass
sc = &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Provisioner: "kubernetes.io/vsphere-volume",
}
if scParameters != nil {
sc.Parameters = scParameters
}
return sc
}
func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
},
},
StorageClassName: &(storageclass.Name),
},
}
return claim
}
// func to get pod spec with given volume claim, node selector labels and command
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pod-pvc-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
if nodeSelectorKV != nil {
pod.Spec.NodeSelector = nodeSelectorKV
}
return pod
}
// func to get pod spec with given volume paths, node selector lables and container commands
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
var volumeMounts []v1.VolumeMount
var volumes []v1.Volume
for index, volumePath := range volumePaths {
name := fmt.Sprintf("volume%v", index+1)
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
vsphereVolume.VolumePath = volumePath
vsphereVolume.FSType = "ext4"
volumes = append(volumes, v1.Volume{Name: name})
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
}
if commands == nil || len(commands) == 0 {
commands = []string{
"/bin/sh",
"-c",
"while true; do sleep 2; done",
}
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "busybox",
Command: commands,
VolumeMounts: volumeMounts,
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: volumes,
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
}
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
}
}
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
Expect(err).NotTo(HaveOccurred())
}
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}
// Get vSphere Volume Path from PVC
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pv.Spec.VsphereVolume.VolumePath
}
// Get canonical volume path for volume Path.
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
var folderID string
canonicalVolumePath := volumePath
dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
if err != nil {
return "", err
}
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
if len(dsPath) <= 1 {
return canonicalVolumePath, nil
}
datastore := dsPathObj.Datastore
dsFolder := dsPath[0]
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
if !isValidUUID(dsFolder) {
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
// Querying a non-existent dummy disk on the datastore folder.
// It would fail and return an folder ID in the error message.
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
if err != nil {
re := regexp.MustCompile("File (.*?) was not found")
match := re.FindStringSubmatch(err.Error())
canonicalVolumePath = match[1]
}
}
diskPath := getPathFromVMDiskPath(canonicalVolumePath)
if diskPath == "" {
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
}
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
return canonicalVolumePath, nil
}
// getPathFromVMDiskPath retrieves the path from VM Disk Path.
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
func getPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
}
//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil
}
// getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(dc.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
//isValidUUID checks if the string is a valid UUID.
func isValidUUID(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}
// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
} else {
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
}
}
}
}
return nil, nil
}
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
fileExt := ".vmdk"
diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
if err != nil {
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
}
vmVolumes[nodeName] = volPaths
}
return vmVolumes, nil
}
// convertVolPathToDevicePath takes volPath and returns canonical volume path
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
if err != nil {
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
return canonicalVolumePath, nil
}
// get .vmx file path for a virtual machine
func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var nodeVM mo.VirtualMachine
err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
Expect(err).NotTo(HaveOccurred())
Expect(nodeVM.Config).NotTo(BeNil())
vmxPath = nodeVM.Config.Files.VmPathName
framework.Logf("vmx file path is %s", vmxPath)
return vmxPath
}
// verify ready node count. Try upto 3 minutes. Return true if count is expected count
func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
numNodes := 0
for i := 0; i < 36; i++ {
nodeList := framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
numNodes = len(nodeList.Items)
if numNodes == expectedNodes {
break
}
time.Sleep(5 * time.Second)
}
return (numNodes == expectedNodes)
}
// poweroff nodeVM and confirm the poweroff state
func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Powering off node VM %s", nodeName)
_, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
}
// poweron nodeVM and confirm the poweron state
func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Powering on node VM %s", nodeName)
vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
}
// unregister a nodeVM from VC
func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
poweroffNodeVM(nodeName, vm)
framework.Logf("Unregistering node VM %s", nodeName)
err := vm.Unregister(ctx)
Expect(err).NotTo(HaveOccurred(), "Unable to unregister the node")
}
// register a nodeVM into a VC
func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
Expect(err).NotTo(HaveOccurred())
registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
Expect(err).NotTo(HaveOccurred())
err = registerTask.Wait(ctx)
Expect(err).NotTo(HaveOccurred())
vmPath := filepath.Join(workingDir, nodeName)
vm, err := finder.VirtualMachine(ctx, vmPath)
Expect(err).NotTo(HaveOccurred())
poweronNodeVM(nodeName, vm)
}
// disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[string]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
for vm, volumes := range vmVolumes {
volumeAttachedMap := make(map[string]bool)
for _, volume := range volumes {
attached, err := diskIsAttached(volume, vm)
if err != nil {
return nil, err
}
volumeAttachedMap[volume] = attached
}
nodeVolumesAttachMap[vm] = volumeAttachedMap
}
return disksAttached, nil
}
// diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
return false, err
}
if device == nil {
return false, nil
}
framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
return true, nil
}
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
// this gives the VM UUID which can be used to find Node VM from vCenter
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
var nodesInfo []*NodeInfo
for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
if nodeInfo != nil {
nodesInfo = append(nodesInfo, nodeInfo)
}
}
return nodesInfo
}
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
rand.Seed(time.Now().Unix())
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}
// invokeVCenterServiceControl invokes the given command for the given service
// via service-control on the given vCenter host over SSH.
func invokeVCenterServiceControl(command, service, host string) error {
sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
result, err := framework.SSH(sshCmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
framework.LogSSHResult(result)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
}
return nil
}
// expectVolumeToBeAttached checks if the given Volume is attached to the given
// Node, else fails.
func expectVolumeToBeAttached(nodeName, volumePath string) {
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
}
// expectVolumesToBeAttached checks if the given Volumes are attached to the
// corresponding set of Nodes, else fails.
func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
for i, pod := range pods {
nodeName := pod.Spec.NodeName
volumePath := volumePaths[i]
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
}
}
// expectFilesToBeAccessible checks if the given files are accessible on the
// corresponding set of Nodes, else fails.
func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
for i, pod := range pods {
podName := pod.Name
filePath := filePaths[i]
By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
}
}
// writeContentToPodFile writes the given content to the specified file.
func writeContentToPodFile(namespace, podName, filePath, content string) error {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
return err
}
// expectFileContentToMatch checks if a given file contains the specified
// content, else fails.
func expectFileContentToMatch(namespace, podName, filePath, content string) {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
}
// expectFileContentsToMatch checks if the given contents match the ones present
// in corresponding files on respective Pods, else fails.
func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
for i, pod := range pods {
podName := pod.Name
filePath := filePaths[i]
By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
expectFileContentToMatch(namespace, podName, filePath, contents[i])
}
}

View File

@ -1,131 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Tests to verify volume provisioning on a clustered datastore
1. Static provisioning
2. Dynamic provisioning
3. Dynamic provisioning with spbm policy
This test reads env
1. CLUSTER_DATASTORE which should be set to clustered datastore
2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore
*/
var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-provision")
var (
client clientset.Interface
namespace string
scParameters map[string]string
clusterDatastore string
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeInfo = GetReadySchedulableRandomNodeInfo()
scParameters = make(map[string]string)
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
})
/*
Steps:
1. Create volume options with datastore to be a clustered datastore
2. Create a vsphere volume
3. Create podspec with volume path. Create a corresponding pod
4. Verify disk is attached
5. Delete the pod and wait for the disk to be detached
6. Delete the volume
*/
It("verify static provisioning on clustered datastore", func() {
var volumePath string
By("creating a test vsphere volume")
volumeOptions := new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + namespace
volumeOptions.Datastore = clusterDatastore
volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Deleting the vsphere volume")
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
By("Creating pod")
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
nodeName := pod.Spec.NodeName
By("Verifying volume is attached")
expectVolumeToBeAttached(nodeName, volumePath)
By("Deleting pod")
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
})
/*
Steps:
1. Create storage class parameter and specify datastore to be a clustered datastore name
2. invokeValidPolicyTest - util to do e2e dynamic provision test
*/
It("verify dynamic provision with default parameter on clustered datastore", func() {
scParameters[Datastore] = clusterDatastore
invokeValidPolicyTest(f, client, namespace, scParameters)
})
/*
Steps:
1. Create storage class parameter and specify storage policy to be a tag based spbm policy
2. invokeValidPolicyTest - util to do e2e dynamic provision test
*/
It("verify dynamic provision with spbm policy on clustered datastore", func() {
policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
scParameters[SpbmStoragePolicy] = policyDatastoreCluster
invokeValidPolicyTest(f, client, namespace, scParameters)
})
})

View File

@ -1,97 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
InvalidDatastore = "invalidDatastore"
DatastoreSCName = "datastoresc"
)
/*
Test to verify datastore specified in storage-class is being honored while volume creation.
Steps
1. Create StorageClass with invalid datastore.
2. Create PVC which uses the StorageClass created in step 1.
3. Expect the PVC to fail.
4. Verify the error returned on PVC failure is the correct.
*/
var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-datastore")
var (
client clientset.Interface
namespace string
scParameters map[string]string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
scParameters = make(map[string]string)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
})
It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
By("Invoking Test for invalid datastore")
scParameters[Datastore] = InvalidDatastore
scParameters[DiskFormat] = ThinDisk
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs`
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
})
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Expect claim to fail provisioning volume")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}

View File

@ -1,212 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify diskformat specified in storage-class is being honored while volume creation.
Valid and supported options are eagerzeroedthick, zeroedthick and thin
Steps
1. Create StorageClass with diskformat set to valid type
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound
5. Create pod using PVC on specific node.
6. Wait for Disk to be attached to the node.
7. Get node VM's devices and find PV's Volume Disk.
8. Get Backing Info of the Volume Disk and obtain EagerlyScrub and ThinProvisioned
9. Based on the value of EagerlyScrub and ThinProvisioned, verify diskformat is correct.
10. Delete pod and Wait for Volume Disk to be detached from the Node.
11. Delete PVC, PV and Storage Class
*/
var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-disk-format")
const (
NodeLabelKey = "vsphere_e2e_label_volume_diskformat"
)
var (
client clientset.Interface
namespace string
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
isNodeLabeled = true
}
})
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(client, nodeName, NodeLabelKey)
}
})
It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: eagerzeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
})
It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: zeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
})
It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: thin")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin")
})
})
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
scParameters := make(map[string]string)
scParameters["diskformat"] = diskFormat
By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
Expect(err).NotTo(HaveOccurred())
defer func() {
client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil)
}()
By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
/*
PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info
to check EagerlyScrub and ThinProvisioned property
*/
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
}
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
By("Verifing disk format")
eagerlyScrub := false
thinProvisioned := false
diskFound := false
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred())
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))
for _, disk := range disks {
backing := disk.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backingFileName := filepath.Base(backing.FileName) + filepath.Ext(backing.FileName)
if backingFileName == pvvmdkfileName {
diskFound = true
if backing.EagerlyScrub != nil {
eagerlyScrub = *backing.EagerlyScrub
}
if backing.ThinProvisioned != nil {
thinProvisioned = *backing.ThinProvisioned
}
break
}
}
Expect(diskFound).To(BeTrue(), "Failed to find disk")
isDiskFormatCorrect := false
if diskFormat == "eagerzeroedthick" {
if eagerlyScrub == true && thinProvisioned == false {
isDiskFormatCorrect = true
}
} else if diskFormat == "zeroedthick" {
if eagerlyScrub == false && thinProvisioned == false {
isDiskFormatCorrect = true
}
} else if diskFormat == "thin" {
if eagerlyScrub == false && thinProvisioned == true {
isDiskFormatCorrect = true
}
}
return isDiskFormatCorrect
}

View File

@ -1,95 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
DiskSizeSCName = "disksizesc"
)
/*
Test to verify disk size specified in PVC is being honored while volume creation.
Steps
1. Create StorageClass.
2. Create PVC with invalid disk size which uses the StorageClass created in step 1.
3. Expect the PVC to fail.
4. Verify the error returned on PVC failure is the correct.
*/
var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-disksize")
var (
client clientset.Interface
namespace string
scParameters map[string]string
datastore string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
scParameters = make(map[string]string)
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
})
It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() {
By("Invoking Test for invalid disk size")
scParameters[Datastore] = datastore
scParameters[DiskFormat] = ThinDisk
diskSize := "1"
err := invokeInvalidDiskSizeTestNeg(client, namespace, scParameters, diskSize)
Expect(err).To(HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DiskSizeSCName + `\": A specified parameter was not correct`
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
})
func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error {
By("Creating Storage Class With invalid disk size")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Expect claim to fail provisioning volume")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}

View File

@ -1,189 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
Ext4FSType = "ext4"
Ext3FSType = "ext3"
InvalidFSType = "ext10"
ExecCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
)
/*
Test to verify fstype specified in storage-class is being honored after volume creation.
Steps
1. Create StorageClass with fstype set to valid type (default case included).
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound.
5. Create pod using PVC on specific node.
6. Wait for Disk to be attached to the node.
7. Execute command in the pod to get fstype.
8. Delete pod and Wait for Volume Disk to be detached from the Node.
9. Delete PVC, PV and Storage Class.
Test to verify if an invalid fstype specified in storage class fails pod creation.
Steps
1. Create StorageClass with inavlid.
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound.
5. Create pod using PVC.
6. Verify if the pod creation fails.
7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node.
*/
var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-fstype")
var (
client clientset.Interface
namespace string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
})
It("verify fstype - ext3 formatted volume", func() {
By("Invoking Test for fstype: ext3")
invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType)
})
It("verify fstype - default value should be ext4", func() {
By("Invoking Test for fstype: Default Value - ext4")
invokeTestForFstype(f, client, namespace, "", Ext4FSType)
})
It("verify invalid fstype", func() {
By("Invoking Test for fstype: invalid Value")
invokeTestForInvalidFstype(f, client, namespace, InvalidFSType)
})
})
func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) {
framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
// Create Persistent Volume
By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred())
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
}
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
// Create Persistent Volume
By("Creating Storage Class With Invalid Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
By("Creating pod to attach PV to the node")
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
Expect(eventList.Items).NotTo(BeEmpty())
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
isFound := false
for _, item := range eventList.Items {
if strings.Contains(item.Message, errorMsg) {
isFound = true
}
}
Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure")
}
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters))
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
return pvclaim, persistentvolumes
}
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).NotTo(HaveOccurred())
// Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
return pod
}
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
nodeName := pod.Spec.NodeName
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, nodeName)
}

View File

@ -1,138 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify volume remains attached after kubelet restart on master node
For the number of schedulable nodes,
1. Create a volume with default volume options
2. Create a Pod
3. Verify the volume is attached
4. Restart the kubelet on master node
5. Verify again that the volume is attached
6. Delete the pod and wait for the volume to be detached
7. Delete the volume
*/
var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("restart-master")
const labelKey = "vsphere_e2e_label"
var (
client clientset.Interface
namespace string
volumePaths []string
pods []*v1.Pod
numNodes int
nodeKeyValueLabelList []map[string]string
nodeNameList []string
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes = len(nodes.Items)
if numNodes < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
for i := 0; i < numNodes; i++ {
nodeName := nodes.Items[i].Name
nodeNameList = append(nodeNameList, nodeName)
nodeLabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel := make(map[string]string)
nodeKeyValueLabel[labelKey] = nodeLabelValue
nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel)
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue)
}
})
It("verify volume remains attached after master kubelet restart", func() {
// Create pod on each node
for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePodWithWait(f, client, pod)
By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pods = append(pods, pod)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
}
By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22"
err := framework.RestartKubelet(masterAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up")
err = framework.WaitForKubeletUp(masterAddress)
Expect(err).NotTo(HaveOccurred())
for i, pod := range pods {
volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
})
})

View File

@ -1,119 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-unregister")
var (
client clientset.Interface
namespace string
workingDir string
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
Expect(err).NotTo(HaveOccurred())
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
})
It("node unregister", func() {
By("Get total Ready nodes")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
totalNodesCount := len(nodeList.Items)
nodeVM := nodeList.Items[0]
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeVM.ObjectMeta.Name)
vmObject := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
// Find VM .vmx file path, host, resource pool.
// They are required to register a node VM to VC
vmxFilePath := getVMXFilePath(vmObject)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vmHost, err := vmObject.HostSystem(ctx)
Expect(err).NotTo(HaveOccurred())
vmPool, err := vmObject.ResourcePool(ctx)
Expect(err).NotTo(HaveOccurred())
// Unregister Node VM
By("Unregister a node VM")
unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject)
// Ready nodes should be 1 less
By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
var nodeNameList []string
for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
}
Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name))
// Register Node VM
By("Register back the node VM")
registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost)
// Ready nodes should be equal to earlier count
By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
nodeNameList = nodeNameList[:0]
for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
}
Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name))
// Sanity test that pod provisioning works
By("Sanity check for volume lifecycle")
scParameters := make(map[string]string)
storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment")
scParameters[SpbmStoragePolicy] = storagePolicy
invokeValidPolicyTest(f, client, namespace, scParameters)
})
})

View File

@ -1,184 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify volume status after node power off:
1. Verify the pod got provisioned on a different node with volume attached to it
2. Verify the volume is detached from the powered off node
*/
var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-poweroff")
var (
client clientset.Interface
namespace string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
})
/*
Steps:
1. Create a StorageClass
2. Create a PVC with the StorageClass
3. Create a Deployment with 1 replica, using the PVC
4. Verify the pod got provisioned on a node
5. Verify the volume is attached to the node
6. Power off the node where pod got provisioned
7. Verify the pod got provisioned on a different node
8. Verify the volume is attached to the new node
9. Verify the volume is detached from the old node
10. Delete the Deployment and wait for the volume to be detached
11. Delete the PVC
12. Delete the StorageClass
*/
It("verify volume status after node power off", func() {
By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := diskIsAttached(volumePath, node1)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1))
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
defer vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
// Waiting for the pod to be failed over to a different node
node2, err := waitForPodToFailover(client, deployment, node1)
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1))
vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
})
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
var (
err error
newNode string
timeout = 3 * time.Minute
pollTime = 10 * time.Second
)
err = wait.Poll(pollTime, timeout, func() (bool, error) {
newNode, err = getNodeForDeployment(client, deployment)
if err != nil {
return true, err
}
if newNode != oldNode {
framework.Logf("The pod has been failed over from %q to %q", oldNode, newNode)
return true, nil
}
framework.Logf("Waiting for pod to be failed over from %q", oldNode)
return false, nil
})
if err != nil {
if err == wait.ErrWaitTimeout {
framework.Logf("Time out after waiting for %v", timeout)
}
framework.Logf("Pod did not fail over from %q with error: %v", oldNode, err)
return "", err
}
return getNodeForDeployment(client, deployment)
}
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}
return podList.Items[0].Spec.NodeName, nil
}

View File

@ -1,120 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"os"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to perform Disk Ops storm.
Steps
1. Create storage class for thin Provisioning.
2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files.
3. Wait until all disks are ready and all PVs and PVCs get bind. (CreateVolume storm)
4. Create pod to mount volumes using PVCs created in step 2. (AttachDisk storm)
5. Wait for pod status to be running.
6. Verify all volumes accessible and available in the pod.
7. Delete pod.
8. wait until volumes gets detached. (DetachDisk storm)
9. Delete all PVCs. This should delete all Disks. (DeleteVolume storm)
10. Delete storage class.
*/
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-ops-storm")
const DEFAULT_VOLUME_OPS_SCALE = 30
var (
client clientset.Interface
namespace string
storageclass *storage.StorageClass
pvclaims []*v1.PersistentVolumeClaim
persistentvolumes []*v1.PersistentVolume
err error
volume_ops_scale int
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred())
} else {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
}
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
})
AfterEach(func() {
By("Deleting PVCs")
for _, claim := range pvclaims {
framework.DeletePersistentVolumeClaim(client, claim.Name, namespace)
}
By("Deleting StorageClass")
err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
Expect(err).NotTo(HaveOccurred())
})
It("should create pod with many volumes and verify no attach call fails", func() {
By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
By("Creating Storage Class")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters))
Expect(err).NotTo(HaveOccurred())
By("Creating PVCs using the Storage Class")
count := 0
for count < volume_ops_scale {
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
count++
}
By("Waiting for all claims to be in bound phase")
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PVs to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
})
})

View File

@ -1,234 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/* This test calculates latency numbers for volume lifecycle operations
1. Create 4 type of storage classes
2. Read the total number of volumes to be created and volumes per pod
3. Create total PVCs (number of volumes)
4. Create Pods with attached volumes per pod
5. Verify access to the volumes
6. Delete pods and wait for volumes to detach
7. Delete the PVCs
*/
const (
SCSIUnitsAvailablePerNode = 55
CreateOp = "CreateOp"
AttachOp = "AttachOp"
DetachOp = "DetachOp"
DeleteOp = "DeleteOp"
)
var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-performance")
var (
client clientset.Interface
namespace string
nodeSelectorList []*NodeSelector
policyName string
datastoreName string
volumeCount int
volumesPerPod int
iterations int
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
// Read the environment variables
volumeCount = GetAndExpectIntEnvVar(VCPPerfVolumeCount)
volumesPerPod = GetAndExpectIntEnvVar(VCPPerfVolumesPerPod)
iterations = GetAndExpectIntEnvVar(VCPPerfIterations)
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes := framework.GetReadySchedulableNodesOrDie(client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items))
Expect(volumeCount).To(BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg)
msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode)
Expect(volumesPerPod).To(BeNumerically("<=", SCSIUnitsAvailablePerNode), msg)
nodeSelectorList = createNodeLabels(client, namespace, nodes)
})
It("vcp performance tests", func() {
scList := getTestStorageClasses(client, policyName, datastoreName)
defer func(scList []*storageV1.StorageClass) {
for _, sc := range scList {
client.StorageV1().StorageClasses().Delete(sc.Name, nil)
}
}(scList)
sumLatency := make(map[string]float64)
for i := 0; i < iterations; i++ {
latency := invokeVolumeLifeCyclePerformance(f, client, namespace, scList, volumesPerPod, volumeCount, nodeSelectorList)
for key, val := range latency {
sumLatency[key] += val
}
}
iterations64 := float64(iterations)
framework.Logf("Average latency for below operations")
framework.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64)
framework.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64)
framework.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64)
framework.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64)
})
})
func getTestStorageClasses(client clientset.Interface, policyName, datastoreName string) []*storageV1.StorageClass {
const (
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
)
scNames := []string{storageclass1, storageclass2, storageclass3, storageclass4}
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %v", scname))
var sc *storageV1.StorageClass
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
Expect(sc).NotTo(BeNil())
Expect(err).NotTo(HaveOccurred())
scArrays[index] = sc
}
return scArrays
}
// invokeVolumeLifeCyclePerformance peforms full volume life cycle management and records latency for each operation
func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) {
var (
totalpvclaims [][]*v1.PersistentVolumeClaim
totalpvs [][]*v1.PersistentVolume
totalpods []*v1.Pod
)
nodeVolumeMap := make(map[string][]string)
latency = make(map[string]float64)
numPods := volumeCount / volumesPerPod
By(fmt.Sprintf("Creating %d PVCs", volumeCount))
start := time.Now()
for i := 0; i < numPods; i++ {
var pvclaims []*v1.PersistentVolumeClaim
for j := 0; j < volumesPerPod; j++ {
currsc := sc[((i*numPods)+j)%len(sc)]
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
Expect(err).NotTo(HaveOccurred())
pvclaims = append(pvclaims, pvclaim)
}
totalpvclaims = append(totalpvclaims, pvclaims)
}
for _, pvclaims := range totalpvclaims {
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
totalpvs = append(totalpvs, persistentvolumes)
}
elapsed := time.Since(start)
latency[CreateOp] = elapsed.Seconds()
By("Creating pod to attach PVs to the node")
start = time.Now()
for i, pvclaims := range totalpvclaims {
nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
totalpods = append(totalpods, pod)
defer framework.DeletePodWithWait(f, client, pod)
}
elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds()
for i, pod := range totalpods {
verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
}
By("Deleting pods")
start = time.Now()
for _, pod := range totalpods {
err := framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
}
elapsed = time.Since(start)
latency[DetachOp] = elapsed.Seconds()
for i, pod := range totalpods {
for _, pv := range totalpvs[i] {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
}
err := waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
By("Deleting the PVCs")
start = time.Now()
for _, pvclaims := range totalpvclaims {
for _, pvc := range pvclaims {
err = framework.DeletePersistentVolumeClaim(client, pvc.Name, namespace)
Expect(err).NotTo(HaveOccurred())
}
}
elapsed = time.Since(start)
latency[DeleteOp] = elapsed.Seconds()
return latency
}

View File

@ -1,392 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Volume Placement", func() {
f := framework.NewDefaultFramework("volume-placement")
const (
NodeLabelKey = "vsphere_e2e_label_volume_placement"
)
var (
c clientset.Interface
ns string
volumePaths []string
node1Name string
node1KeyValueLabel map[string]string
node2Name string
node2KeyValueLabel map[string]string
isNodeLabeled bool
nodeInfo *NodeInfo
vsp *VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
})
AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
/*
Steps
1. Remove labels assigned to node 1 and node 2
2. Delete VMDK volume
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 {
if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
}
if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
}
}
})
/*
Steps
1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
2. Create pod and wait for pod to become ready.
3. Verify volume is attached to the node1.
4. Create empty file on the volume to verify volume is writable.
5. Verify newly created file and previously created files exist on the volume.
6. Delete pod.
7. Wait for volume to be detached from the node1.
8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
*/
It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
Steps
1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
2. Create pod and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Create empty file on the volume to verify volume is writable.
5. Verify newly created file and previously created files exist on the volume.
6. Delete pod.
7. Wait for volume to be detached from the node1.
8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
9. Create pod and wait for pod to become ready.
10. Verify volume is attached to the node2.
11. Create empty file on the volume to verify volume is writable.
12. Verify newly created file and previously created files exist on the volume.
13. Delete pod.
*/
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
})
/*
Test multiple volumes from same datastore within the same pod
1. Create volumes - vmdk2
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
*/
It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFilesNames := []string{
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
}
volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
})
/*
Test multiple volumes from different datastore within the same pod
1. Create volumes - vmdk2 on non default shared datastore.
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
*/
It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore")
var volumeOptions *VolumeOptions
volumeOptions = new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileNames := []string{
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
}
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
Test Back-to-back pod creation/deletion with different volume sources on the same worker node
1. Create volumes - vmdk2
2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
4. Create pod-A using pod-SpecA and wait for pod to become ready.
5. Create pod-B using pod-SpecB and wait for POD to become ready.
6. Verify volumes are attached to the node.
7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
9. Delete pod-A and pod-B
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
11. Wait for vmdk1 and vmdk2 to be detached from node.
*/
It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
var (
podA *v1.Pod
podB *v1.Pod
testvolumePathsPodA []string
testvolumePathsPodB []string
podAFiles []string
podBFiles []string
)
defer func() {
By("clean up undeleted pods")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
}
}()
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume
By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
podAFiles = append(podAFiles, podAFileName)
podBFiles = append(podBFiles, podBFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
By("Creating empty file on volume mounted on pod-A")
framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
By("Creating empty file volume mounted on pod-B")
framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
// Verify newly and previously created files present on the volume mounted on the pod
By("Verify newly Created file and previously created files present on volume mounted on pod-A")
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...)
By("Verify newly Created file and previously created files present on volume mounted on pod-B")
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...)
By("Deleting pod-A")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
By("Deleting pod-B")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name)
}
})
})
func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
nodes := framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
node1Name = nodes.Items[0].Name
node2Name = nodes.Items[1].Name
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel[NodeLabelKey] = node1LabelValue
framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel[NodeLabelKey] = node2LabelValue
framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
}
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod
var err error
By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
pod, err = client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths {
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
}
return pod
}
func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
// Create empty files on the mounted volumes on the pod to verify volume is writable
By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
// Verify newly and previously created files present on the volume mounted on the pod
By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...)
}
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
}
}

View File

@ -1,176 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify that a volume remains attached through vpxd restart.
For the number of schedulable nodes:
1. Create a Volume with default options.
2. Create a Pod with the created Volume.
3. Verify that the Volume is attached.
4. Create a file with random contents under the Volume's mount point on the Pod.
5. Stop the vpxd service on the vCenter host.
6. Verify that the file is accessible on the Pod and that it's contents match.
7. Start the vpxd service on the vCenter host.
8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match.
9. Delete the Pod and wait for the Volume to be detached.
10. Delete the Volume.
*/
var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("restart-vpxd")
type node struct {
name string
kvLabels map[string]string
nodeInfo *NodeInfo
}
const (
labelKey = "vsphere_e2e_label_vpxd_restart"
vpxdServiceName = "vmware-vpxd"
)
var (
client clientset.Interface
namespace string
vcNodesMap map[string][]node
)
BeforeEach(func() {
// Requires SSH access to vCenter.
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(client)
numNodes := len(nodes.Items)
Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart")
vcNodesMap = make(map[string][]node)
for i := 0; i < numNodes; i++ {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name)
nodeName := nodes.Items[i].Name
nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID())
framework.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel)
vcHost := nodeInfo.VSphere.Config.Hostname
vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{
name: nodeName,
kvLabels: map[string]string{labelKey: nodeLabel},
nodeInfo: nodeInfo,
})
}
})
It("verify volume remains attached through vpxd restart", func() {
for vcHost, nodes := range vcNodesMap {
var (
volumePaths []string
filePaths []string
fileContents []string
pods []*v1.Pod
)
framework.Logf("Testing for nodes on vCenter host: %s", vcHost)
for i, node := range nodes {
By(fmt.Sprintf("Creating test vsphere volume %d", i))
volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, node.name))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for pod %d to be ready", i))
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pods = append(pods, pod)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
expectVolumeToBeAttached(nodeName, volumePath)
By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i))
filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10))
randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10))
err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent)
Expect(err).NotTo(HaveOccurred())
filePaths = append(filePaths, filePath)
fileContents = append(fileContents, randomContent)
}
By("Stopping vpxd on the vCenter host")
vcAddress := vcHost + ":22"
err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to stop vpxd on the vCenter host")
expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
By("Starting vpxd on the vCenter host")
err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to start vpxd on the vCenter host")
expectVolumesToBeAttached(pods, volumePaths)
expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
for i, node := range nodes {
pod := pods[i]
nodeName := pod.Spec.NodeName
volumePath := volumePaths[i]
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))
err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
}
})
})

View File

@ -1,354 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"hash/fnv"
"time"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
VmfsDatastore = "sharedVmfs-0"
VsanDatastore = "vsanDatastore"
Datastore = "datastore"
Policy_DiskStripes = "diskStripes"
Policy_HostFailuresToTolerate = "hostFailuresToTolerate"
Policy_CacheReservation = "cacheReservation"
Policy_ObjectSpaceReservation = "objectSpaceReservation"
Policy_IopsLimit = "iopsLimit"
DiskFormat = "diskformat"
ThinDisk = "thin"
SpbmStoragePolicy = "storagepolicyname"
BronzeStoragePolicy = "bronze"
HostFailuresToTolerateCapabilityVal = "0"
CacheReservationCapabilityVal = "20"
DiskStripesCapabilityVal = "1"
ObjectSpaceReservationCapabilityVal = "30"
IopsLimitCapabilityVal = "100"
StripeWidthCapabilityVal = "2"
DiskStripesCapabilityInvalidVal = "14"
HostFailuresToTolerateCapabilityInvalidVal = "4"
DummyVMPrefixName = "vsphere-k8s"
DiskStripesCapabilityMaxVal = "11"
)
/*
Test to verify the storage policy based management for dynamic volume provisioning inside kubernetes.
There are 2 ways to achieve it:
1. Specify VSAN storage capabilities in the storage-class.
2. Use existing vCenter SPBM storage policies.
Valid VSAN storage capabilities are mentioned below:
1. hostFailuresToTolerate
2. forceProvisioning
3. cacheReservation
4. diskStripes
5. objectSpaceReservation
6. iopsLimit
Steps
1. Create StorageClass with.
a. VSAN storage capabilities set to valid/invalid values (or)
b. Use existing vCenter SPBM storage policies.
2. Create PVC which uses the StorageClass created in step 1.
3. Wait for PV to be provisioned.
4. Wait for PVC's status to become Bound
5. Create pod using PVC on specific node.
6. Wait for Disk to be attached to the node.
7. Delete pod and Wait for Volume Disk to be detached from the Node.
8. Delete PVC, PV and Storage Class
*/
var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-vsan-policy")
var (
client clientset.Interface
namespace string
scParameters map[string]string
policyName string
tagPolicy string
masterNode string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
framework.Logf("framework: %+v", f)
scParameters = make(map[string]string)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
Expect(masternodes).NotTo(BeEmpty())
masterNode = masternodes.List()[0]
})
// Valid policy.
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = "1"
scParameters[Policy_ObjectSpaceReservation] = "30"
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Invalid VSAN storage capabilties parameters.
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
// Invalid policy on a VSAN test bed.
// diskStripes value has to be between 1 and 12.
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
// Invalid policy on a VSAN test bed.
// hostFailuresToTolerate value has to be between 0 and 3 including.
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
// Specify a valid VSAN policy on a non-VSAN test bed.
// The test should fail.
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VmfsDatastore
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
"The policy parameters will work only with VSAN Datastore."
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
scParameters[SpbmStoragePolicy] = policyName
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
})
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
scParameters[SpbmStoragePolicy] = tagPolicy
scParameters[Datastore] = VsanDatastore
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
scParameters[SpbmStoragePolicy] = policyName
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
})
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Expect claim to fail provisioning volume")
_, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute)
Expect(err).To(HaveOccurred())
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM
time.Sleep(6 * time.Minute)
fnvHash := fnv.New32a()
fnvHash.Write([]byte(vmName))
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg)
}