vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -8,65 +8,56 @@ load(
go_library(
name = "go_default_library",
srcs = [
"csi_hostpath.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"flexvolume.go",
"framework.go",
"mounted_volume_resize.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"persistent_volumes-vsphere.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",
"pv_protection.go",
"pvc_protection.go",
"regional_pd.go",
"volume_expand.go",
"volume_io.go",
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
"vsphere_scale.go",
"vsphere_statefulsets.go",
"vsphere_stress.go",
"vsphere_utils.go",
"vsphere_volume_cluster_ds.go",
"vsphere_volume_datastore.go",
"vsphere_volume_diskformat.go",
"vsphere_volume_disksize.go",
"vsphere_volume_fstype.go",
"vsphere_volume_master_restart.go",
"vsphere_volume_node_poweroff.go",
"vsphere_volume_ops_storm.go",
"vsphere_volume_perf.go",
"vsphere_volume_placement.go",
"vsphere_volume_vsan_policy.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage",
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
@ -82,6 +73,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
@ -97,6 +89,10 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//test/e2e/storage/utils:all-srcs",
"//test/e2e/storage/vsphere:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,199 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
)
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiExternalProvisionerImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiDriverRegistrarImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiExternalAttacherImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiHostPathPluginImage,
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}

View File

@ -0,0 +1,242 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"math/rand"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
)
const (
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
)
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *v1.ServiceAccount {
serviceAccountName := config.Prefix + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRole(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *rbacv1.ClusterRole {
clusterRoleClient := client.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-cluster-role",
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"create", "delete", "get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"persistentvolumeclaims"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"volumeattachments"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: []string{"get", "list", "watch"},
},
},
}
clusterRoleClient.Delete(role.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleClient.Get(role.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiClusterRoleBinding(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRole *rbacv1.ClusterRole,
) *rbacv1.ClusterRoleBinding {
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRole.GetName(),
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
return ret
}
var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
var (
cs clientset.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "csi",
ClientNodeName: node.Name,
ServerNodeName: node.Name,
WaitForCompletion: true,
}
})
// Create one of these for each of the drivers to be tested
// CSI hostPath driver test
Describe("Sanity CSI plugin test using hostPath CSI driver", func() {
var (
clusterRole *rbacv1.ClusterRole
serviceAccount *v1.ServiceAccount
)
BeforeEach(func() {
By("deploying csi hostpath driver")
clusterRole = csiClusterRole(cs, config, false)
serviceAccount = csiServiceAccount(cs, config, false)
csiClusterRoleBinding(cs, config, false, serviceAccount, clusterRole)
csiHostPathPod(cs, config, false, f, serviceAccount)
})
AfterEach(func() {
By("uninstalling csi hostpath driver")
csiHostPathPod(cs, config, true, f, serviceAccount)
csiClusterRoleBinding(cs, config, true, serviceAccount, clusterRole)
serviceAccount = csiServiceAccount(cs, config, true)
clusterRole = csiClusterRole(cs, config, true)
})
It("should provision storage with a hostPath CSI driver", func() {
t := storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
})

View File

@ -30,6 +30,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -51,7 +53,7 @@ const (
wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
)
var _ = SIGDescribe("EmptyDir wrapper volumes", func() {
var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
f := framework.NewDefaultFramework("emptydir-wrapper")
It("should not conflict", func() {

View File

@ -22,12 +22,18 @@ import (
"net"
"path"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
versionutil "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -38,7 +44,9 @@ const (
// On gci, root is read-only and controller-manager containerized. Assume
// controller-manager has started with --flex-volume-plugin-dir equal to this
// (see cluster/gce/config-test.sh)
gciVolumePluginDir = "/etc/srv/kubernetes/kubelet-plugins/volume/exec"
gciVolumePluginDir = "/home/kubernetes/flexvolume"
gciVolumePluginDirLegacy = "/etc/srv/kubernetes/kubelet-plugins/volume/exec"
gciVolumePluginDirVersion = "1.10.0"
)
// testFlexVolume tests that a client pod using a given flexvolume driver
@ -64,8 +72,8 @@ func testFlexVolume(driver string, cs clientset.Interface, config framework.Volu
// installFlex installs the driver found at filePath on the node, and restarts
// kubelet if 'restart' is true. If node is nil, installs on the master, and restarts
// controller-manager if 'restart' is true.
func installFlex(node *v1.Node, vendor, driver, filePath string, restart bool) {
flexDir := getFlexDir(node == nil, vendor, driver)
func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath string, restart bool) {
flexDir := getFlexDir(c, node, vendor, driver)
flexFile := path.Join(flexDir, driver)
host := ""
@ -102,8 +110,8 @@ func installFlex(node *v1.Node, vendor, driver, filePath string, restart bool) {
}
}
func uninstallFlex(node *v1.Node, vendor, driver string) {
flexDir := getFlexDir(node == nil, vendor, driver)
func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) {
flexDir := getFlexDir(c, node, vendor, driver)
host := ""
if node != nil {
@ -116,11 +124,26 @@ func uninstallFlex(node *v1.Node, vendor, driver string) {
sshAndLog(cmd, host)
}
func getFlexDir(master bool, vendor, driver string) string {
func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) string {
volumePluginDir := defaultVolumePluginDir
if framework.ProviderIs("gce") {
if (master && framework.MasterOSDistroIs("gci")) || (!master && framework.NodeOSDistroIs("gci")) {
volumePluginDir = gciVolumePluginDir
if node == nil && framework.MasterOSDistroIs("gci") {
v, err := getMasterVersion(c)
if err != nil {
framework.Failf("Error getting master version: %v", err)
}
if v.AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
} else if node != nil && framework.NodeOSDistroIs("gci") {
if getNodeVersion(node).AtLeast(versionutil.MustParseGeneric(gciVolumePluginDirVersion)) {
volumePluginDir = gciVolumePluginDir
} else {
volumePluginDir = gciVolumePluginDirLegacy
}
}
}
flexDir := path.Join(volumePluginDir, fmt.Sprintf("/%s~%s/", vendor, driver))
@ -136,7 +159,25 @@ func sshAndLog(cmd, host string) {
}
}
var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
func getMasterVersion(c clientset.Interface) (*versionutil.Version, error) {
var err error
var v *version.Info
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
v, err = c.Discovery().ServerVersion()
return err == nil, nil
})
if waitErr != nil {
return nil, fmt.Errorf("Could not get the master version: %v", waitErr)
}
return versionutil.MustParseSemantic(v.GitVersion), nil
}
func getNodeVersion(node *v1.Node) *versionutil.Version {
return versionutil.MustParseSemantic(node.Status.NodeInfo.KubeletVersion)
}
var _ = utils.SIGDescribe("Flexvolumes [Disruptive]", func() {
f := framework.NewDefaultFramework("flexvolume")
// note that namespace deletion is handled by delete-namespace flag
@ -170,7 +211,7 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(&node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
@ -180,7 +221,7 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(&node, "k8s", driverInstallAs)
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
It("should be mountable when attachable", func() {
@ -188,9 +229,9 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(&node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(nil, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver), true /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
@ -200,9 +241,9 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(&node, "k8s", driverInstallAs)
uninstallFlex(cs, &node, "k8s", driverInstallAs)
By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(nil, "k8s", driverInstallAs)
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})
It("should install plugin without kubelet restart", func() {
@ -210,7 +251,7 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
driverInstallAs := driver + "-" + suffix
By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(&node, "k8s", driverInstallAs, path.Join(driverDir, driver), false /* restart */)
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver), false /* restart */)
testFlexVolume(driverInstallAs, cs, config, f)
@ -220,6 +261,6 @@ var _ = SIGDescribe("Flexvolumes [Disruptive] [Feature:FlexVolume]", func() {
}
By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(&node, "k8s", driverInstallAs)
uninstallFlex(cs, &node, "k8s", driverInstallAs)
})
})

View File

@ -0,0 +1,172 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
)
f := framework.NewDefaultFramework("mounted-volume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
nodeKey = "mounted_volume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = newClaim(test, ns, "default")
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
It("Should verify mounted devices can be resized", func() {
By("Waiting for PVC to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a deployment with the provisioned volume")
deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
defer c.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Getting a pod from deployment")
podList, err := framework.GetPodsForDeployment(c, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
By("Deleting the pod from deployment")
err = framework.DeletePodWithWait(f, c, &pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing")
By("Waiting for deployment to create new pod")
pod, err = waitForDeploymentToRecreatePod(c, deployment)
Expect(err).NotTo(HaveOccurred(), "While waiting for pod to be recreated")
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *extensions.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
for _, pod := range podList.Items {
switch pod.Status.Phase {
case v1.PodRunning:
runningPod = pod
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
return false, err
})
return runningPod, waitErr
}

View File

@ -40,6 +40,7 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -52,7 +53,7 @@ const (
minNodes = 2
)
var _ = SIGDescribe("Pod Disks", func() {
var _ = utils.SIGDescribe("Pod Disks", func() {
var (
ns string
cs clientset.Interface
@ -389,8 +390,8 @@ var _ = SIGDescribe("Pod Disks", func() {
Expect(true, strings.Contains(string(output), string(host0Name)))
By("deleting host0")
resp, err := gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v response=%#v", err, resp))
err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err))
By("expecting host0 node to be re-created")
numNodes := countReadyNodes(cs, host0Name)
Expect(numNodes).To(Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))

View File

@ -18,18 +18,17 @@ package storage
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume)
@ -37,17 +36,12 @@ type disruptiveTest struct {
testItStmt string
runTest testBody
}
type kubeletOpt string
const (
MinNodes = 2
NodeStateTimeout = 1 * time.Minute
kStart kubeletOpt = "start"
kStop kubeletOpt = "stop"
kRestart kubeletOpt = "restart"
MinNodes = 2
)
var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
f := framework.NewDefaultFramework("disruptive-pv")
var (
@ -223,11 +217,15 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
disruptiveTestTable := []disruptiveTest{
{
testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.",
runTest: testKubeletRestartsAndRestoresMount,
runTest: utils.TestKubeletRestartsAndRestoresMount,
},
{
testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: testVolumeUnmountsFromDeletedPod,
runTest: utils.TestVolumeUnmountsFromDeletedPod,
},
{
testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
},
}
@ -243,61 +241,6 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
})
})
// testKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
By("Writing to the volume.")
file := "/mnt/_SUCCESS"
out, err := podExec(clientPod, fmt.Sprintf("touch %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
By("Restarting kubelet")
kubeletCommand(kRestart, c, clientPod)
By("Testing that written file is accessible.")
out, err = podExec(clientPod, fmt.Sprintf("cat %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
}
// testVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Stopping the kubelet.")
kubeletCommand(kStop, c, clientPod)
defer func() {
if err != nil {
kubeletCommand(kStart, c, clientPod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
kubeletCommand(kStart, c, clientPod)
err = f.WaitForPodTerminated(clientPod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
}
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
// by the test.
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
@ -339,101 +282,3 @@ func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string,
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
}
// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `kStart`, `kStop`, and `kRestart`
func kubeletCommand(kOp kubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
sudoPresent := false
systemctlPresent := false
kubeletPid := ""
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
framework.Logf("Checking if sudo command is present")
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
framework.Logf("Checking if systemctl command is present")
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == kRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == kStop {
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == kRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == kStart || kOp == kRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
} else {
command = "service kubelet status | grep 'Main PID'"
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID")
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout
}
// podExec wraps RunKubectl to execute a bash cmd in target pod
func podExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}

View File

@ -26,6 +26,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node
@ -51,7 +52,7 @@ func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.
}
// Testing configurations of single a PV/PVC pair attached to a GCE PD
var _ = SIGDescribe("PersistentVolumes GCEPD", func() {
var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
var (
c clientset.Interface
diskName string

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
@ -85,7 +86,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
return nil
}
var _ = SIGDescribe("PersistentVolumes", func() {
var _ = utils.SIGDescribe("PersistentVolumes", func() {
// global vars for the Context()s and It()'s below
f := framework.NewDefaultFramework("pv")

View File

@ -0,0 +1,127 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PV Protection", func() {
var (
client clientset.Interface
nameSpace string
err error
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
volLabel labels.Set
selector *metav1.LabelSelector
)
f := framework.NewDefaultFramework("pv-protection")
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
// Enforce binding only within test space via selector labels
volLabel = labels.Set{framework.VolumeSelectorKey: nameSpace}
selector = metav1.SetAsLabelSelector(volLabel)
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "hostpath-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp/data",
},
},
}
pvcConfig = framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
Selector: selector,
}
By("Creating a PV")
// make the pv definitions
pv = framework.MakePersistentVolume(pvConfig)
// create the PV
pv, err = client.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred(), "Error creating PV")
By("Checking that PV Protection finalizer is set")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PV status")
Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue())
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
})
It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() {
By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
})
It("Verify that PV bound to a PVC is not removed immediately", func() {
By("Creating a PVC")
pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace)
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating PVC")
By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PV")
By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PV status")
Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout)
})
})

View File

@ -0,0 +1,143 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("PVC Protection", func() {
var (
client clientset.Interface
nameSpace string
err error
pvc *v1.PersistentVolumeClaim
pvcCreatedAndNotDeleted bool
)
f := framework.NewDefaultFramework("pvc-protection")
BeforeEach(func() {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
By("Creating a PVC")
suffix := "pvc-protection"
defaultSC := getDefaultStorageClassName(client)
testStorageClass := storageClassTest{
claimSize: "1Gi",
}
pvc = newClaim(testStorageClass, nameSpace, suffix)
pvc.Spec.StorageClassName = &defaultSC
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating PVC")
pvcCreatedAndNotDeleted = true
By("Waiting for PVC to become Bound")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
By("Checking that PVC Protection finalizer is set")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While getting PVC status")
Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue())
})
AfterEach(func() {
if pvcCreatedAndNotDeleted {
framework.DeletePersistentVolumeClaim(client, pvc.Name, nameSpace)
}
})
It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() {
By("Deleting the PVC")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
It("Verify that PVC in active use by a pod is not removed immediately", func() {
By("Creating a Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pod, err := framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running")
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() {
By("Creating first Pod that becomes Running and therefore is actively using the PVC")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
firstPod, err := framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running")
By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "Error deleting PVC")
By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, pvcClaims, false, "")
Expect(err2).NotTo(HaveOccurred(), "While creating second pod that uses a PVC that is being deleted and that is Unschedulable")
By("Deleting the second pod that uses the PVC that is being deleted")
err = framework.DeletePodWithWait(f, client, secondPod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking again that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While checking PVC status")
Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil))
By("Deleting the first pod that uses the PVC")
err = framework.DeletePodWithWait(f, client, firstPod)
Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod")
By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod")
framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout)
pvcCreatedAndNotDeleted = false
})
})

View File

@ -0,0 +1,450 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"fmt"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"strings"
"time"
)
const (
pvDeletionTimeout = 3 * time.Minute
statefulSetReadyTimeout = 3 * time.Minute
)
var _ = utils.SIGDescribe("Regional PD [Feature:RegionalPD]", func() {
f := framework.NewDefaultFramework("regional-pd")
// filled in BeforeEach
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessMultizone(c)
})
Describe("RegionalPD", func() {
It("should provision storage [Slow]", func() {
testVolumeProvisioning(c, ns)
})
It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
testZonalFailover(c, ns)
})
})
})
func testVolumeProvisioning(c clientset.Interface, ns string) {
cloudZones := getTwoRandomZones(c)
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []storageClassTest{
{
name: "HDD Regional PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(cloudZones, ","),
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
return verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
},
},
{
name: "HDD Regional PD with auto zone selection on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
},
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
zones, err := framework.GetClusterZones(c)
if err != nil {
return err
}
return verifyZonesInPV(volume, zones, false /* match */)
},
},
}
for _, test := range tests {
class := newStorageClass(test, ns, "" /* suffix */)
claim := newClaim(test, ns, "" /* suffix */)
claim.Spec.StorageClassName = &class.Name
testDynamicProvisioning(test, c, claim, class)
}
}
func testZonalFailover(c clientset.Interface, ns string) {
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount := len(nodes.Items)
cloudZones := getTwoRandomZones(c)
class := newRegionalStorageClass(ns, cloudZones)
claimTemplate := newClaimTemplate(ns)
claimTemplate.Spec.StorageClassName = &class.Name
statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns)
By("creating a StorageClass " + class.Name)
_, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil),
"Error deleting StorageClass %s", class.Name)
}()
By("creating a StatefulSet")
_, err = c.CoreV1().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
_, err = c.AppsV1().StatefulSets(ns).Create(statefulSet)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name)
// typically this claim has already been deleted
framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */),
"Error deleting StatefulSet %s", statefulSet.Name)
framework.Logf("deleting claims in namespace %s", ns)
pvc := getPVC(c, ns, regionalPDLabels)
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil),
"Error deleting claim %s.", pvc.Name)
if pvc.Spec.VolumeName != "" {
err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout)
if err != nil {
framework.Logf("WARNING: PV %s is not yet deleted, and subsequent tests may be affected.", pvc.Spec.VolumeName)
}
}
}()
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout)
if err != nil {
pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
Expect(err).NotTo(HaveOccurred())
}
pvc := getPVC(c, ns, regionalPDLabels)
By("getting zone information from pod")
pod := getPod(c, ns, regionalPDLabels)
nodeName := pod.Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
podZone := node.Labels[apis.LabelZoneFailureDomain]
// TODO (verult) Consider using node taints to simulate zonal failure instead.
By("deleting instance group belonging to pod's zone")
// Asynchronously detect a pod reschedule is triggered during/after instance group deletion.
waitStatus := make(chan error)
go func() {
waitStatus <- waitForStatefulSetReplicasNotReady(statefulSet.Name, ns, c)
}()
cloud, err := framework.GetGCECloud()
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
instanceGroupName := framework.TestContext.CloudConfig.NodeInstanceGroup
instanceGroup, err := cloud.GetInstanceGroup(instanceGroupName, podZone)
Expect(err).NotTo(HaveOccurred(),
"Error getting instance group %s in zone %s", instanceGroupName, podZone)
err = framework.DeleteManagedInstanceGroup(podZone)
Expect(err).NotTo(HaveOccurred(),
"Error deleting instance group in zone %s", podZone)
defer func() {
framework.Logf("recreating instance group %s", instanceGroup.Name)
// HACK improve this when Managed Instance Groups are available through the cloud provider API
templateName := strings.Replace(instanceGroupName, "group", "template", 1 /* n */)
framework.ExpectNoError(framework.CreateManagedInstanceGroup(instanceGroup.Size, podZone, templateName),
"Error recreating instance group %s in zone %s", instanceGroup.Name, podZone)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, framework.RestartNodeReadyAgainTimeout),
"Error waiting for nodes from the new instance group to become ready.")
}()
err = <-waitStatus
Expect(err).ToNot(HaveOccurred(), "Error waiting for replica to be deleted during failover: %v", err)
err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout)
if err != nil {
pod := getPod(c, ns, regionalPDLabels)
Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(),
"The statefulset pod has the following conditions: %s", pod.Status.Conditions)
Expect(err).NotTo(HaveOccurred())
}
By("verifying the same PVC is used by the new pod")
Expect(getPVC(c, ns, regionalPDLabels).Name).To(Equal(pvc.Name),
"The same PVC should be used after failover.")
By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
pod = getPod(c, ns, regionalPDLabels)
logs, err := framework.GetPodLogs(c, ns, pod.Name, "")
Expect(err).NotTo(HaveOccurred(),
"Error getting logs from pod %s in namespace %s", pod.Name, ns)
lineCount := len(strings.Split(strings.TrimSpace(logs), "\n"))
expectedLineCount := 2
Expect(lineCount).To(Equal(expectedLineCount),
"Line count of the written file should be %d.", expectedLineCount)
// Verify the pod is scheduled in the other zone.
By("verifying the pod is scheduled in a different zone.")
var otherZone string
if cloudZones[0] == podZone {
otherZone = cloudZones[1]
} else {
otherZone = cloudZones[0]
}
nodeName = pod.Spec.NodeName
node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
newPodZone := node.Labels[apis.LabelZoneFailureDomain]
Expect(newPodZone).To(Equal(otherZone),
"The pod should be scheduled in zone %s after all nodes in zone %s have been deleted", otherZone, podZone)
}
func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim {
selector := labels.Set(pvcLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(pvcList.Items)).To(Equal(1), "There should be exactly 1 PVC matched.")
return &pvcList.Items[0]
}
func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.Pod {
selector := labels.Set(podLabels).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(Equal(1), "There should be exactly 1 pod matched.")
return &podList.Items[0]
}
// Generates the spec of a StatefulSet with 1 replica that mounts a Regional PD.
func newStatefulSet(claimTemplate *v1.PersistentVolumeClaim, ns string) (sts *appsv1.StatefulSet, svc *v1.Service, labels map[string]string) {
var replicas int32 = 1
labels = map[string]string{"app": "regional-pd-workload"}
svc = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-service",
Namespace: ns,
Labels: labels,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 80,
Name: "web",
}},
ClusterIP: v1.ClusterIPNone,
Selector: labels,
},
}
sts = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-sts",
Namespace: ns,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
ServiceName: svc.Name,
Replicas: &replicas,
Template: *newPodTemplate(labels),
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*claimTemplate},
},
}
return
}
func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
return &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
// This container writes its pod name to a file in the Regional PD
// and prints the entire file to stdout.
{
Name: "busybox",
Image: "gcr.io/google_containers/busybox",
Command: []string{"sh", "-c"},
Args: []string{
"echo ${POD_NAME} >> /mnt/data/regional-pd/pods.txt;" +
"cat /mnt/data/regional-pd/pods.txt;" +
"sleep 3600;",
},
Env: []v1.EnvVar{{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
}},
Ports: []v1.ContainerPort{{
ContainerPort: 80,
Name: "web",
}},
VolumeMounts: []v1.VolumeMount{{
Name: "regional-pd-vol",
MountPath: "/mnt/data/regional-pd",
}},
},
},
},
}
}
func newClaimTemplate(ns string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "regional-pd-vol",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
}
func newRegionalStorageClass(namespace string, zones []string) *storage.StorageClass {
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "-sc",
},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(zones, ","),
"replication-type": "regional-pd",
},
}
}
func getTwoRandomZones(c clientset.Interface) []string {
zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred())
Expect(zones.Len()).To(BeNumerically(">=", 2),
"The test should only be run in multizone clusters.")
zone1, _ := zones.PopAny()
zone2, _ := zones.PopAny()
return []string{zone1, zone2}
}
// Waits for at least 1 replica of a StatefulSet to become not ready or until timeout occurs, whichever comes first.
func waitForStatefulSetReplicasNotReady(statefulSetName, ns string, c clientset.Interface) error {
const poll = 3 * time.Second
const timeout = statefulSetReadyTimeout
framework.Logf("Waiting up to %v for StatefulSet %s to have at least 1 replica to become not ready", timeout, statefulSetName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, poll, err)
continue
} else {
if sts.Status.ReadyReplicas < *sts.Spec.Replicas {
framework.Logf("%d replicas are ready out of a total of %d replicas in StatefulSet %s. (%v)",
sts.Status.ReadyReplicas, *sts.Spec.Replicas, statefulSetName, time.Since(start))
return nil
} else {
framework.Logf("StatefulSet %s found but there are %d ready replicas and %d total replicas.", statefulSetName, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
}
}
}
return fmt.Errorf("All replicas in StatefulSet %s are still ready within %v", statefulSetName, timeout)
}
// If match is true, check if zones in PV exactly match zones given.
// Otherwise, check whether zones in PV is superset of zones given.
func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool) error {
pvZones, err := util.LabelZonesToSet(volume.Labels[apis.LabelZoneFailureDomain])
if err != nil {
return err
}
if match && zones.Equal(pvZones) || !match && zones.IsSuperset(pvZones) {
return nil
}
return fmt.Errorf("Zones in StorageClass are %v, but zones in PV are %v", zones, pvZones)
}

37
vendor/k8s.io/kubernetes/test/e2e/storage/utils/BUILD generated vendored Normal file
View File

@ -0,0 +1,37 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"framework.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//test/e2e/framework:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package utils
import "github.com/onsi/ginkgo"

View File

@ -0,0 +1,260 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
type KubeletOpt string
const (
NodeStateTimeout = 1 * time.Minute
KStart KubeletOpt = "start"
KStop KubeletOpt = "stop"
KRestart KubeletOpt = "restart"
)
// PodExec wraps RunKubectl to execute a bash cmd in target pod
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
sudoPresent := false
systemctlPresent := false
kubeletPid := ""
nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
framework.Logf("Checking if sudo command is present")
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
framework.Logf("Checking if systemctl command is present")
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
Expect(isPidChanged).To(BeTrue(), "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
} else {
command = "service kubelet status | grep 'Main PID'"
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP))
framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID")
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
By("Writing to the volume.")
file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
By("Testing that written file is accessible.")
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
framework.Logf(out)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcelly deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDelete bool) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
defer func() {
if err != nil {
KubeletCommand(KStart, c, clientPod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodTerminated(clientPod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, false)
}
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcelly deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, true)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
framework.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}

View File

@ -0,0 +1,208 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
resizePollInterval = 2 * time.Second
// total time to wait for cloudprovider or file system resize to finish
totalResizeWaitPeriod = 20 * time.Minute
)
var _ = utils.SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storage.StorageClass
)
f := framework.NewDefaultFramework("volume-expand")
BeforeEach(func() {
framework.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
resizableSc, err = createResizableStorageClass(test, ns, "resizing", c)
Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class")
Expect(resizableSc.AllowVolumeExpansion).NotTo(BeNil())
Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue())
pvc = newClaim(test, ns, "default")
pvc.Spec.StorageClassName = &resizableSc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
})
AfterEach(func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace))
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(resizableSc.Name, nil))
})
It("Verify if editing PVC allows resize", func() {
By("Waiting for pvc to be in bound phase")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "While creating pods for resizing")
defer func() {
err = framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "while cleaning up pod already deleted in resize test")
}()
By("Expanding current pvc")
newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, c)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size")
Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvc.Name)
}
By("Waiting for cloudprovider resize to finish")
err = waitForControllerVolumeResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish")
By("Checking for conditions on pvc")
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While fetching pvc after controller resize")
inProgressConditions := pvc.Status.Conditions
Expect(len(inProgressConditions)).To(Equal(1), "pvc must have file system resize pending condition")
Expect(inProgressConditions[0].Type).To(Equal(v1.PersistentVolumeClaimFileSystemResizePending), "pvc must have fs resizing condition")
By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, c, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing")
By("Creating a new pod with same volume")
pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "")
Expect(err).NotTo(HaveOccurred(), "while recreating pod for resizing")
defer func() {
err = framework.DeletePodWithWait(f, c, pod2)
Expect(err).NotTo(HaveOccurred(), "while cleaning up pod before exiting resizing test")
}()
By("Waiting for file system resize to finish")
pvc, err = waitForFSResize(pvc, c)
Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish")
pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
})
})
func createResizableStorageClass(t storageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) {
stKlass := newStorageClass(t, ns, suffix)
allowExpansion := true
stKlass.AllowVolumeExpansion = &allowExpansion
var err error
stKlass, err = c.StorageV1().StorageClasses().Create(stKlass)
return stKlass, err
}
func expandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := origPVC.Name
updatedPVC := origPVC.DeepCopy()
waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) {
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(pvcName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for resizing with %v", pvcName, err)
}
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(updatedPVC)
if err == nil {
return true, nil
}
framework.Logf("Error updating pvc %s with %v", pvcName, err)
return false, nil
})
return updatedPVC, waitErr
}
func waitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) error {
pvName := pvc.Spec.VolumeName
return wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err)
}
pvSize := pv.Spec.Capacity[v1.ResourceStorage]
// If pv size is greater or equal to requested size that means controller resize is finished.
if pvSize.Cmp(pvcSize) >= 0 {
return true, nil
}
return false, nil
})
}
func waitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
var updatedPVC *v1.PersistentVolumeClaim
waitErr := wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) {
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
}
pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]
pvcStatusSize := updatedPVC.Status.Capacity[v1.ResourceStorage]
//If pvc's status field size is greater than or equal to pvc's size then done
if pvcStatusSize.Cmp(pvcSize) >= 0 {
return true, nil
}
return false, nil
})
return updatedPVC, waitErr
}

View File

@ -40,6 +40,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -126,7 +127,7 @@ func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / minFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
_, err := podExec(pod, writeCmd)
_, err := utils.PodExec(pod, writeCmd)
return err
}
@ -134,7 +135,7 @@ func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
By("verifying file size")
rtnstr, err := podExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
@ -147,7 +148,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) er
}
By("verifying file hash")
rtnstr, err = podExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
@ -168,7 +169,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) er
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := podExec(pod, fmt.Sprintf("rm -f %s", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
@ -237,7 +238,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framewo
// These tests need privileged containers which are disabled by default.
// TODO: support all of the plugins tested in storage/volumes.go
var _ = SIGDescribe("Volume plugin streaming [Slow]", func() {
var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
f := framework.NewDefaultFramework("volume-io")
var (
config framework.VolumeTestConfig

View File

@ -22,6 +22,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/prometheus/common/model"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -30,11 +31,12 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// This test needs to run in serial because other tests could interfere
// with metrics being tested here.
var _ = SIGDescribe("[Serial] Volume metrics", func() {
var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
var (
c clientset.Interface
ns string
@ -164,6 +166,138 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
})
// Test for pv controller metrics, concretely: bound/unbound pv/pvc count.
Describe("PVController", func() {
const (
classKey = "storage_class"
namespaceKey = "namespace"
boundPVKey = "pv_collector_bound_pv_count"
unboundPVKey = "pv_collector_unbound_pv_count"
boundPVCKey = "pv_collector_bound_pvc_count"
unboundPVCKey = "pv_collector_unbound_pvc_count"
)
var (
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
className = "bound-unbound-count-test-sc"
pvConfig = framework.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "/data"},
},
NamePrefix: "pv-test-",
StorageClassName: className,
}
pvcConfig = framework.PersistentVolumeClaimConfig{StorageClassName: &className}
metrics = []struct {
name string
dimension string
}{
{boundPVKey, classKey},
{unboundPVKey, classKey},
{boundPVCKey, namespaceKey},
{unboundPVCKey, namespaceKey},
}
// Original metric values before we create any PV/PVCs. The length should be 4,
// and the elements should be bound pv count, unbound pv count, bound pvc count,
// unbound pvc count in turn.
// We use these values to calculate relative increment of each test.
originMetricValues []map[string]int64
)
// validator used to validate each metric's values, the length of metricValues
// should be 4, and the elements should be bound pv count, unbound pv count, bound
// pvc count, unbound pvc count in turn.
validator := func(metricValues []map[string]int64) {
Expect(len(metricValues)).To(Equal(4),
"Wrong metric size: %d", len(metricValues))
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err)
for i, metric := range metrics {
expectValues := metricValues[i]
if expectValues == nil {
expectValues = make(map[string]int64)
}
// We using relative increment value instead of absolute value to reduce unexpected flakes.
// Concretely, we expect the difference of the updated values and original values for each
// test suit are equal to expectValues.
actualValues := calculateRelativeValues(originMetricValues[i],
getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension))
Expect(actualValues).To(Equal(expectValues),
"Wrong pv controller metric %s(%s): wanted %v, got %v",
metric.name, metric.dimension, expectValues, actualValues)
}
}
BeforeEach(func() {
if !metricsGrabber.HasRegisteredMaster() {
framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
}
pv = framework.MakePersistentVolume(pvConfig)
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
// Initializes all original metric values.
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err)
for _, metric := range metrics {
originMetricValues = append(originMetricValues,
getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension))
}
})
AfterEach(func() {
if err := framework.DeletePersistentVolume(c, pv.Name); err != nil {
framework.Failf("Error deleting pv: %v", err)
}
if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil {
framework.Failf("Error deleting pvc: %v", err)
}
// Clear original metric values.
originMetricValues = nil
})
It("should create none metrics for pvc controller before creating any PV or PVC", func() {
validator([]map[string]int64{nil, nil, nil, nil})
})
It("should create unbound pv count metrics for pvc controller after creating pv only",
func() {
var err error
pv, err = framework.CreatePV(c, pv)
Expect(err).NotTo(HaveOccurred(), "Error creating pv: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey)
validator([]map[string]int64{nil, {className: 1}, nil, nil})
})
It("should create unbound pvc count metrics for pvc controller after creating pvc only",
func() {
var err error
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc: %v", err)
waitForPVControllerSync(metricsGrabber, unboundPVCKey, namespaceKey)
validator([]map[string]int64{nil, nil, nil, {ns: 1}})
})
It("should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc",
func() {
var err error
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred(), "Error creating pv pvc: %v", err)
waitForPVControllerSync(metricsGrabber, boundPVKey, classKey)
waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey)
validator([]map[string]int64{{className: 1}, nil, {ns: 1}, nil})
})
})
})
func waitForDetachAndGrabMetrics(oldMetrics map[string]int64, metricsGrabber *metrics.MetricsGrabber) map[string]int64 {
@ -269,3 +403,54 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string
Expect(errCount).To(Equal(0), "Found invalid samples")
return found
}
// Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero.
func waitForPVControllerSync(metricsGrabber *metrics.MetricsGrabber, metricName, dimension string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
Steps: 21,
}
verifyMetricFunc := func() (bool, error) {
updatedMetrics, err := metricsGrabber.GrabFromControllerManager()
if err != nil {
framework.Logf("Error fetching controller-manager metrics")
return false, err
}
return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil
}
waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc)
Expect(waitErr).NotTo(HaveOccurred(),
"Timeout error fetching pv controller metrics : %v", waitErr)
}
func getPVControllerMetrics(ms metrics.ControllerManagerMetrics, metricName, dimension string) map[string]int64 {
result := make(map[string]int64)
for method, samples := range ms {
if method != metricName {
continue
}
for _, sample := range samples {
count := int64(sample.Value)
dimensionName := string(sample.Metric[model.LabelName(dimension)])
result[dimensionName] = count
}
}
return result
}
func calculateRelativeValues(originValues, updatedValues map[string]int64) map[string]int64 {
relativeValues := make(map[string]int64)
for key, value := range updatedValues {
relativeValue := value - originValues[key]
if relativeValue != 0 {
relativeValues[key] = relativeValue
}
}
for key, value := range originValues {
if _, exist := updatedValues[key]; !exist && value > 0 {
relativeValues[key] = -value
}
}
return relativeValues
}

View File

@ -43,8 +43,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type storageClassTest struct {
@ -55,6 +55,7 @@ type storageClassTest struct {
claimSize string
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
nodeName string
}
const (
@ -138,10 +139,10 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
runInPodWithVolume(client, claim.Namespace, claim.Name, command)
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@ -229,7 +230,7 @@ func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
return nil
}
var _ = SIGDescribe("Dynamic Provisioning", func() {
var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
// filled in BeforeEach
@ -249,140 +250,140 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
// that can be used to persist data among pods.
tests := []storageClassTest{
{
"SSD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "SSD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-ssd",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
},
{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
},
// AWS
{
"gp2 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "gp2 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "gp2",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
},
},
{
"io1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "io1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
"3.5Gi",
"4Gi", // 4 GiB is minimum for io1
func(volume *v1.PersistentVolume) error {
claimSize: "3.5Gi",
expectedSize: "4Gi", // 4 GiB is minimum for io1
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
},
},
{
"sc1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "sc1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "sc1",
},
"500Gi", // minimum for sc1
"500Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "500Gi", // minimum for sc1
expectedSize: "500Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "sc1", false)
},
},
{
"st1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "st1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "st1",
},
"500Gi", // minimum for st1
"500Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "500Gi", // minimum for st1
expectedSize: "500Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
},
{
"encrypted EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "encrypted EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"encrypted": "true",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
"generic Cinder volume on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
name: "generic Cinder volume on OpenStack",
cloudProviders: []string{"openstack"},
provisioner: "kubernetes.io/cinder",
parameters: map[string]string{},
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: nil, // there is currently nothing to check on OpenStack
},
{
"Cinder volume with empty volume type and zone on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{
name: "Cinder volume with empty volume type and zone on OpenStack",
cloudProviders: []string{"openstack"},
provisioner: "kubernetes.io/cinder",
parameters: map[string]string{
"type": "",
"availability": "",
},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: nil, // there is currently nothing to check on OpenStack
},
// vSphere generic test
{
"generic vSphere volume",
[]string{"vsphere"},
"kubernetes.io/vsphere-volume",
map[string]string{},
"1.5Gi",
"1.5Gi",
nil,
name: "generic vSphere volume",
cloudProviders: []string{"vsphere"},
provisioner: "kubernetes.io/vsphere-volume",
parameters: map[string]string{},
claimSize: "1.5Gi",
expectedSize: "1.5Gi",
pvCheck: nil,
},
{
"Azure disk volume with empty sku and location",
[]string{"azure"},
"kubernetes.io/azure-disk",
map[string]string{},
"1Gi",
"1Gi",
nil,
name: "Azure disk volume with empty sku and location",
cloudProviders: []string{"azure"},
provisioner: "kubernetes.io/azure-disk",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: nil,
},
}
@ -429,15 +430,15 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := storageClassTest{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1G",
expectedSize: "1G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}
@ -463,15 +464,15 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := storageClassTest{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1G",
expectedSize: "1G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}
@ -500,7 +501,7 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
Expect(err).NotTo(HaveOccurred())
// Get a list of all zones in the project
zones, err := gceCloud.GetComputeService().Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
Expect(err).NotTo(HaveOccurred())
for _, z := range zones.Items {
allZones.Insert(z.Name)
@ -520,7 +521,7 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
name: "unmanaged_zone",
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{"zone": unmanagedZone},
claimSize: "1Gi",
claimSize: "1G",
}
sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc)
@ -640,6 +641,14 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
claimSize: "2Gi",
expectedSize: "2Gi",
}
// gce or gke
if getDefaultPluginName() == "kubernetes.io/gce-pd" {
// using GB not GiB as e2e test unit since gce-pd returns GB,
// or expectedSize may be greater than claimSize.
test.claimSize = "2G"
test.expectedSize = "2G"
}
claim := newClaim(test, ns, "default")
testDynamicProvisioning(test, c, claim, nil)
})
@ -782,7 +791,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -820,6 +829,10 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
},
},
}
if len(nodeName) != 0 {
pod.Spec.NodeName = nodeName
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
@ -905,7 +918,7 @@ func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.6",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
@ -1005,16 +1018,8 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
}
func getRandomCloudZone(c clientset.Interface) string {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[kubeletapis.LabelZoneFailureDomain]; found {
zones.Insert(zone)
}
}
zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred())
// return "" in case that no node has zone label
zone, _ := zones.PopAny()
return zone

View File

@ -55,6 +55,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func DeleteCinderVolume(name string) error {
@ -79,7 +82,7 @@ func DeleteCinderVolume(name string) error {
}
// These tests need privileged containers, which are disabled by default.
var _ = SIGDescribe("Volumes", func() {
var _ = utils.SIGDescribe("Volumes", func() {
f := framework.NewDefaultFramework("volume")
// note that namespace deletion is handled by delete-namespace flag
@ -258,14 +261,14 @@ var _ = SIGDescribe("Volumes", func() {
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cephfs",
ServerImage: framework.CephServerImage,
ServerImage: imageutils.GetE2EImage(imageutils.VolumeCephServer),
ServerPorts: []int{6789},
}
defer framework.VolumeTestCleanup(f, config)
_, serverIP := framework.CreateStorageServer(cs, config)
By("sleeping a bit to give ceph server time to initialize")
time.Sleep(20 * time.Second)
time.Sleep(framework.VolumeServerPodStartupSleep)
// create ceph secret
secret := &v1.Secret{
@ -500,24 +503,18 @@ var _ = SIGDescribe("Volumes", func() {
Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
By("creating a test vsphere volume")
c, err := framework.LoadClientset()
if err != nil {
return
}
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {

View File

@ -0,0 +1,80 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"bootstrap.go",
"config.go",
"connection.go",
"context.go",
"nodemapper.go",
"persistent_volumes-vsphere.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",
"vsphere.go",
"vsphere_common.go",
"vsphere_scale.go",
"vsphere_statefulsets.go",
"vsphere_stress.go",
"vsphere_utils.go",
"vsphere_volume_cluster_ds.go",
"vsphere_volume_datastore.go",
"vsphere_volume_diskformat.go",
"vsphere_volume_disksize.go",
"vsphere_volume_fstype.go",
"vsphere_volume_master_restart.go",
"vsphere_volume_node_delete.go",
"vsphere_volume_node_poweroff.go",
"vsphere_volume_ops_storm.go",
"vsphere_volume_perf.go",
"vsphere_volume_placement.go",
"vsphere_volume_vsan_policy.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/volume/util:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,59 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"sync"
)
var once sync.Once
var waiting = make(chan bool)
var f *framework.Framework
// Bootstrap takes care of initializing necessary test context for vSphere tests
func Bootstrap(fw *framework.Framework) {
done := make(chan bool)
f = fw
go func() {
once.Do(bootstrapOnce)
<-waiting
done <- true
}()
<-done
}
func bootstrapOnce() {
// 1. Read vSphere conf and get VSphere instances
vsphereInstances, err := GetVSphereInstances()
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 2. Get all nodes
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
// 3. Get Node to VSphere mapping
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
close(waiting)
}

View File

@ -0,0 +1,180 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"fmt"
"gopkg.in/gcfg.v1"
"io"
"k8s.io/kubernetes/test/e2e/framework"
"os"
)
const (
vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
)
var (
confFileLocation = os.Getenv(vSphereConfFileEnvVar)
)
// Config represents vSphere configuration
type Config struct {
Username string
Password string
Hostname string
Port string
Datacenters string
RoundTripperCount uint
DefaultDatastore string
Folder string
}
// ConfigFile represents the content of vsphere.conf file.
// Users specify the configuration of one or more vSphere instances in vsphere.conf where
// the Kubernetes master and worker nodes are running.
type ConfigFile struct {
Global struct {
// vCenter username.
User string `gcfg:"user"`
// vCenter password in clear text.
Password string `gcfg:"password"`
// vCenter port.
VCenterPort string `gcfg:"port"`
// True if vCenter uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
// Datacenter in which VMs are located.
Datacenters string `gcfg:"datacenters"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
}
VirtualCenter map[string]*Config
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
// Endpoint used to create volumes
Workspace struct {
VCenterIP string `gcfg:"server"`
Datacenter string `gcfg:"datacenter"`
Folder string `gcfg:"folder"`
DefaultDatastore string `gcfg:"default-datastore"`
ResourcePoolPath string `gcfg:"resourcepool-path"`
}
}
// GetVSphereInstances parses vsphere.conf and returns VSphere instances
func GetVSphereInstances() (map[string]*VSphere, error) {
cfg, err := getConfig()
if err != nil {
return nil, err
}
return populateInstanceMap(cfg)
}
func getConfig() (*ConfigFile, error) {
if confFileLocation == "" {
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
}
confFile, err := os.Open(confFileLocation)
if err != nil {
return nil, err
}
defer confFile.Close()
cfg, err := readConfig(confFile)
if err != nil {
return nil, err
}
return &cfg, nil
}
// readConfig parses vSphere cloud config file into ConfigFile.
func readConfig(config io.Reader) (ConfigFile, error) {
if config == nil {
err := fmt.Errorf("no vSphere cloud provider config file given")
return ConfigFile{}, err
}
var cfg ConfigFile
err := gcfg.ReadInto(&cfg, config)
return cfg, err
}
func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
vsphereInstances := make(map[string]*VSphere)
if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" {
msg := fmt.Sprintf("All fields in workspace are mandatory."+
" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace)
framework.Logf(msg)
return nil, errors.New(msg)
}
for vcServer, vcConfig := range cfg.VirtualCenter {
framework.Logf("Initializing vc server %s", vcServer)
if vcServer == "" {
framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified")
return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified")
}
vcConfig.Hostname = vcServer
if vcConfig.Username == "" {
vcConfig.Username = cfg.Global.User
}
if vcConfig.Password == "" {
vcConfig.Password = cfg.Global.Password
}
if vcConfig.Username == "" {
msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer)
framework.Logf(msg)
return nil, errors.New(msg)
}
if vcConfig.Password == "" {
msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer)
framework.Logf(msg)
return nil, errors.New(msg)
}
if vcConfig.Port == "" {
vcConfig.Port = cfg.Global.VCenterPort
}
if vcConfig.Datacenters == "" && cfg.Global.Datacenters != "" {
vcConfig.Datacenters = cfg.Global.Datacenters
}
if vcConfig.RoundTripperCount == 0 {
vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount
}
vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore
vcConfig.Folder = cfg.Workspace.Folder
vsphereIns := VSphere{
Config: vcConfig,
}
vsphereInstances[vcServer] = &vsphereIns
}
framework.Logf("ConfigFile %v \n vSphere instances %v", cfg, vsphereInstances)
return vsphereInstances, nil
}

View File

@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
neturl "net/url"
"sync"
"github.com/golang/glog"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"golang.org/x/net/context"
)
const (
roundTripperDefaultCount = 3
)
var (
clientLock sync.Mutex
)
// Connect makes connection to vSphere
// No actions are taken if a connection exists and alive. Otherwise, a new client will be created.
func Connect(ctx context.Context, vs *VSphere) error {
var err error
clientLock.Lock()
defer clientLock.Unlock()
if vs.Client == nil {
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
manager := session.NewManager(vs.Client.Client)
userSession, err := manager.UserSession(ctx)
if err != nil {
glog.Errorf("Error while obtaining user session. err: %+v", err)
return err
}
if userSession != nil {
return nil
}
glog.Warningf("Creating new client session since the existing session is not valid or not authenticated")
vs.Client.Logout(ctx)
vs.Client, err = NewClient(ctx, vs)
if err != nil {
glog.Errorf("Failed to create govmomi client. err: %+v", err)
return err
}
return nil
}
// NewClient creates a new client for vSphere connection
func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) {
url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", vs.Config.Hostname, vs.Config.Port))
if err != nil {
glog.Errorf("Failed to parse URL: %s. err: %+v", url, err)
return nil, err
}
url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password)
client, err := govmomi.NewClient(ctx, url, true)
if err != nil {
glog.Errorf("Failed to create new client. err: %+v", err)
return nil, err
}
if vs.Config.RoundTripperCount == 0 {
vs.Config.RoundTripperCount = roundTripperDefaultCount
}
client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(vs.Config.RoundTripperCount)))
return client, nil
}

View File

@ -0,0 +1,26 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// Context holds common information for vSphere tests
type VSphereContext struct {
NodeMapper *NodeMapper
VSphereInstances map[string]*VSphere
}
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
var TestContext VSphereContext

View File

@ -0,0 +1,133 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"errors"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"strings"
"sync"
)
type NodeMapper struct {
}
type NodeInfo struct {
Name string
DataCenterRef types.ManagedObjectReference
VirtualMachineRef types.ManagedObjectReference
VSphere *VSphere
}
var (
nameToNodeInfo = make(map[string]*NodeInfo)
)
// GenerateNodeMap populates node name to node info map
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
type VmSearch struct {
vs *VSphere
datacenter *object.Datacenter
}
var wg sync.WaitGroup
var queueChannel []*VmSearch
var datacenters []*object.Datacenter
var err error
for _, vs := range vSphereInstances {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if vs.Config.Datacenters == "" {
datacenters, err = vs.GetAllDatacenter(ctx)
if err != nil {
framework.Logf("NodeMapper error: %v", err)
continue
}
} else {
dcName := strings.Split(vs.Config.Datacenters, ",")
for _, dc := range dcName {
dc = strings.TrimSpace(dc)
if dc == "" {
continue
}
datacenter, err := vs.GetDatacenter(ctx, dc)
if err != nil {
framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err)
continue
}
datacenters = append(datacenters, datacenter)
}
}
for _, dc := range datacenters {
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
}
}
for _, node := range nodeList.Items {
n := node
go func() {
nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
framework.Logf("Searching for node with UUID: %s", nodeUUID)
for _, res := range queueChannel {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter)
if err != nil {
framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s",
err, n.Name, res.vs.Config.Hostname, res.datacenter.Name())
continue
}
if vm != nil {
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
nm.SetNodeInfo(n.Name, nodeInfo)
break
}
}
wg.Done()
}()
wg.Add(1)
}
wg.Wait()
if len(nameToNodeInfo) != len(nodeList.Items) {
return errors.New("all nodes not mapped to respective vSphere")
}
return nil
}
// GetNodeInfo return NodeInfo for given nodeName
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
return nameToNodeInfo[nodeName]
}
// SetNodeInfo sets NodeInfo for given nodeName. This function is not thread safe. Users need to handle concurrency.
func (nm *NodeMapper) SetNodeInfo(nodeName string, nodeInfo *NodeInfo) {
nameToNodeInfo[nodeName] = nodeInfo
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"time"
@ -24,14 +24,13 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// Testing configurations of single a PV/PVC pair attached to a vSphere Disk
var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
var (
c clientset.Interface
ns string
@ -41,11 +40,11 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
vsp *vsphere.VSphere
err error
node types.NodeName
node string
volLabel labels.Set
selector *metav1.LabelSelector
nodeInfo *NodeInfo
)
f := framework.NewDefaultFramework("pv")
@ -65,16 +64,17 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod = nil
pvc = nil
pv = nil
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
if volumePath == "" {
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
@ -102,10 +102,10 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node = types.NodeName(clientPod.Spec.NodeName)
node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(c, vsp, volumePath, node)
isAttached, err := diskIsAttached(volumePath, node)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
})
@ -133,12 +133,8 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
client, err := framework.LoadClientset()
if err != nil {
return
}
framework.ExpectNoError(waitForVSphereDiskToDetach(client, vsp, volumePath, node))
vsp.DeleteVolume(volumePath)
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
})
@ -182,7 +178,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
3. Verify that written file is accessible after kubelet restart
*/
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
testKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv)
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv)
})
/*
@ -197,7 +193,7 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
5. Verify that volume mount not to be found.
*/
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
testVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv)
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv)
})
/*
@ -217,6 +213,6 @@ var _ = SIGDescribe("PersistentVolumes:vsphere", func() {
Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(c, vsp, volumePath, node)
waitForVSphereDiskToDetach(volumePath, node)
})
})

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"strconv"
@ -25,13 +25,12 @@ import (
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
f := framework.NewDefaultFramework("persistentvolumereclaim")
var (
c clientset.Interface
@ -39,6 +38,7 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
nodeInfo *NodeInfo
)
BeforeEach(func() {
@ -47,18 +47,18 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
})
SIGDescribe("persistentvolumereclaim:vsphere", func() {
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
pvc = nil
volumePath = ""
})
AfterEach(func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
testCleanupVSpherePersistentVolumeReclaim(vsp, c, ns, volumePath, pv, pvc)
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
})
/*
@ -74,10 +74,8 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
6. Verify PV is deleted automatically.
*/
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
deletePVCAfterBind(c, ns, pvc, pv)
@ -104,10 +102,9 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
9. Verify PV should be detached from the node and automatically deleted.
*/
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
// Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
@ -115,7 +112,6 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node := types.NodeName(pod.Spec.NodeName)
By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
@ -127,19 +123,19 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, node)
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
Expect(isVolumeAttached).To(BeTrue())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}, vsp)
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred())
By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
@ -166,11 +162,10 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
*/
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var err error
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimRetain)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred())
writeContentToVSpherePV(c, pvc, volumeFileContent)
@ -204,10 +199,10 @@ var _ = SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
})
// Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk")
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
@ -224,10 +219,11 @@ func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.I
}
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 {
vsp.DeleteVolume(volumePath)
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"time"
@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -43,7 +44,7 @@ import (
9. delete pvc_vvol
*/
var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
f := framework.NewDefaultFramework("pvclabelselector")
var (
c clientset.Interface
@ -55,11 +56,14 @@ var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
ssdlabels map[string]string
vvollabels map[string]string
err error
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
@ -68,15 +72,15 @@ var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
})
SIGDescribe("Selector-Label Volume Binding:vsphere", func() {
utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() {
AfterEach(func() {
By("Running clean up actions")
if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
}
})
It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, ns, ssdlabels, vvollabels)
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred())
By("wait for the pvc_ssd to bind with pv_ssd")
@ -100,12 +104,11 @@ var _ = SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
})
})
func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = ""
By("creating vmdk")
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
@ -130,12 +133,10 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabel
return
}
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
if pvc_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)

View File

@ -0,0 +1,240 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/kubernetes/test/e2e/framework"
"path/filepath"
"strconv"
"strings"
"time"
)
const (
VolDir = "kubevols"
DefaultDiskCapacityKB = 2097152
DefaultDiskFormat = "thin"
DefaultSCSIControllerType = "lsiLogic"
VirtualMachineType = "VirtualMachine"
)
// Represents a vSphere instance where one or more kubernetes nodes are running.
type VSphere struct {
Config *Config
Client *govmomi.Client
}
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
Name string
CapacityKB int
DiskFormat string
SCSIControllerType string
Datastore string
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, true)
return finder.Datacenter(ctx, datacenterPath)
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
Connect(ctx, vs)
return object.NewDatacenter(vs.Client.Client, dc.Reference())
}
// GetAllDatacenter returns all the DataCenter Objects
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
Connect(ctx, vs)
finder := find.NewFinder(vs.Client.Client, true)
return finder.DatacenterList(ctx, "*")
}
// GetVMByUUID gets the VM object Reference from the given vmUUID
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
Connect(ctx, vs)
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
s := object.NewSearchIndex(vs.Client.Client)
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
}
// GetFolderByPath gets the Folder Object Reference from the given folder path
// folderPath should be the full path to folder
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return vmFolderMor, err
}
return vmFolder.Reference(), nil
}
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
// If volume is created successfully the canonical disk path is returned else error is returned.
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
var (
err error
directoryAlreadyPresent = false
)
if datacenter == nil {
return "", fmt.Errorf("datacenter is nil")
}
vs.initVolumeOptions(volumeOptions)
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
}
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
fileManager := object.NewFileManager(ds.Client())
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
directoryAlreadyPresent = true
framework.Logf("Directory with the path %+q is already present", directoryPath)
}
}
if !directoryAlreadyPresent {
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
return "", err
}
}
framework.Logf("Created dir with path as %+q", directoryPath)
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(ds.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: volumeOptions.SCSIControllerType,
DiskType: volumeOptions.DiskFormat,
},
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
if err != nil {
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
return "", err
}
volumePath := taskInfo.Result.(string)
canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath)
if err != nil {
return "", err
}
return canonicalDiskPath, nil
}
// DeleteVolume deletes the vmdk file specified in the volumePath.
// if an error is encountered while deleting volume, error is returned.
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath)
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
err = task.Wait(ctx)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}
// IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory.
// if VM is present, function returns true else false.
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder)
if err != nil {
return
}
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
vmFoldersChildren, err := vmFolder.Children(ctx)
if err != nil {
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
return
}
for _, vmFoldersChild := range vmFoldersChildren {
if vmFoldersChild.Reference().Type == VirtualMachineType {
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
return true, nil
}
}
}
return
}
// initVolumeOptions function sets default values for volumeOptions parameters if not set
func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
. "github.com/onsi/gomega"
"os"
"strconv"
)
const (
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
StorageClassDatastoreName = "VSPHERE_DATASTORE"
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
)
const (
VCPClusterDatastore = "CLUSTER_DATASTORE"
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
)
const (
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
VCPScaleInstances = "VCP_SCALE_INSTANCES"
)
const (
VCPStressInstances = "VCP_STRESS_INSTANCES"
VCPStressIterations = "VCP_STRESS_ITERATIONS"
)
const (
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
VCPPerfIterations = "VCP_PERF_ITERATIONS"
)
func GetAndExpectStringEnvVar(varName string) string {
varValue := os.Getenv(varName)
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set")
return varValue
}
func GetAndExpectIntEnvVar(varName string) int {
varValue := GetAndExpectStringEnvVar(varName)
varIntValue, err := strconv.Atoi(varValue)
Expect(err).NotTo(HaveOccurred(), "Error Parsing "+varName)
return varIntValue
}

View File

@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"strconv"
. "github.com/onsi/ginkgo"
@ -26,10 +25,9 @@ import (
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -52,7 +50,7 @@ type NodeSelector struct {
labelValue string
}
var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
f := framework.NewDefaultFramework("vcp-at-scale")
var (
@ -62,42 +60,30 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
volumeCount int
numberOfInstances int
volumesPerPod int
nodeVolumeMapChan chan map[string][]string
nodes *v1.NodeList
policyName string
datastoreName string
nodeVolumeMapChan chan map[string][]string
nodes *v1.NodeList
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string)
// Read the environment variables
volumeCountStr := os.Getenv("VCP_SCALE_VOLUME_COUNT")
Expect(volumeCountStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_COUNT is not set")
volumeCount, err = strconv.Atoi(volumeCountStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_COUNT")
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
volumesPerPodStr := os.Getenv("VCP_SCALE_VOLUME_PER_POD")
Expect(volumesPerPodStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_PER_POD is not set")
volumesPerPod, err = strconv.Atoi(volumesPerPodStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_PER_POD")
numberOfInstancesStr := os.Getenv("VCP_SCALE_INSTANCES")
Expect(numberOfInstancesStr).NotTo(BeEmpty(), "ENV VCP_SCALE_INSTANCES is not set")
numberOfInstances, err = strconv.Atoi(numberOfInstancesStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_INSTANCES")
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
datastoreName = os.Getenv("VSPHERE_DATASTORE")
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes = framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
@ -124,7 +110,7 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
It("vsphere scale tests", func() {
var pvcClaimList []string
nodeVolumeMap := make(map[k8stypes.NodeName][]string)
nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
@ -150,22 +136,19 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scArrays[index] = sc
}
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan, vsp)
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[k8stypes.NodeName(node)] = append(nodeVolumeMap[k8stypes.NodeName(node)], volumeList...)
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
}
}
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
@ -176,7 +159,7 @@ var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
@ -198,7 +181,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string, vsp *vsphere.VSphere) {
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
@ -228,7 +211,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
@ -22,9 +22,9 @@ import (
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -49,7 +49,7 @@ const (
storageclassname = "nginx-sc"
)
var _ = SIGDescribe("vsphere statefulset", func() {
var _ = utils.SIGDescribe("vsphere statefulset", func() {
f := framework.NewDefaultFramework("vsphere-statefulset")
var (
namespace string
@ -59,6 +59,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
framework.SkipUnlessProviderIs("vsphere")
namespace = f.Namespace.Name
client = f.ClientSet
Bootstrap(f)
})
AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
@ -103,9 +104,6 @@ var _ = SIGDescribe("vsphere statefulset", func() {
Expect(scaledownErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// After scale down, verify vsphere volumes are detached from deleted pods
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
for _, sspod := range ssPodsBeforeScaleDown.Items {
@ -116,7 +114,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred())
}
}
}
@ -145,7 +143,7 @@ var _ = SIGDescribe("vsphere statefulset", func() {
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
}

View File

@ -14,12 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"strconv"
"sync"
. "github.com/onsi/ginkgo"
@ -27,10 +25,9 @@ import (
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -43,17 +40,17 @@ import (
4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
*/
var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-stress")
var (
client clientset.Interface
namespace string
instances int
iterations int
err error
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
policyName string
datastoreName string
err error
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
)
BeforeEach(func() {
@ -67,23 +64,16 @@ var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
instancesStr := os.Getenv("VCP_STRESS_INSTANCES")
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_INSTANCES is not set")
instances, err = strconv.Atoi(instancesStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP-STRESS-INSTANCES")
instances = GetAndExpectIntEnvVar(VCPStressInstances)
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterationStr := os.Getenv("VCP_STRESS_ITERATIONS")
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_ITERATIONS is not set")
iterations, err = strconv.Atoi(iterationStr)
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
datastoreName = os.Getenv("VSPHERE_DATASTORE")
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
})
It("vsphere stress tests", func() {
@ -134,8 +124,7 @@ var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done()
defer GinkgoRecover()
vsp, err := getVSphere(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
@ -162,19 +151,19 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))

View File

@ -0,0 +1,757 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"math/rand"
"path/filepath"
"time"
"github.com/golang/glog"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/vmware/govmomi/find"
vimtypes "github.com/vmware/govmomi/vim25/types"
"regexp"
"strings"
)
const (
volumesPerNode = 55
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
DummyDiskName = "kube-dummyDisk.vmdk"
ProviderPrefix = "vsphere://"
)
// volumeState represents the state of a volume.
type volumeState int32
const (
volumeStateDetached volumeState = 1
volumeStateAttached volumeState = 2
)
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
var (
err error
disksAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := disksAreAttached(nodeVolumes)
if err != nil {
return false, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if attached {
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
return false, nil
}
}
}
disksAttached = false
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
return true, nil
})
if err != nil {
return err
}
if disksAttached {
return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
}
return nil
}
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
var (
err error
diskAttached bool
currentState volumeState
timeout = 6 * time.Minute
pollTime = 10 * time.Second
)
var attachedState = map[bool]volumeState{
true: volumeStateAttached,
false: volumeStateDetached,
}
var attachedStateMsg = map[volumeState]string{
volumeStateAttached: "attached to",
volumeStateDetached: "detached from",
}
err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = diskIsAttached(volumePath, nodeName)
if err != nil {
return true, err
}
currentState = attachedState[diskAttached]
if currentState == expectedState {
framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
return true, nil
}
framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
return false, nil
})
if err != nil {
return err
}
if currentState != expectedState {
err = fmt.Errorf("Gave up waiting for Volume %q to be %s %q after %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout)
}
return err
}
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
}
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
var (
pvConfig framework.PersistentVolumeConfig
pv *v1.PersistentVolume
claimRef *v1.ObjectReference
)
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
Prebind: nil,
}
pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Annotations: map[string]string{
util.VolumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
PersistentVolumeSource: pvConfig.PVSource,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: claimRef,
},
}
if labels != nil {
pv.Labels = labels
}
return pv
}
// function to get vsphere persistent volume spec with given selector labels.
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
var (
pvc *v1.PersistentVolumeClaim
)
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
}
if labels != nil {
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
return pvc
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
framework.Logf("Done with writing content to volume")
}
// function to verify content is matching on the volume backed for given PVC
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
framework.Logf("Successfully verified content of the volume")
}
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
var sc *storage.StorageClass
sc = &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Provisioner: "kubernetes.io/vsphere-volume",
}
if scParameters != nil {
sc.Parameters = scParameters
}
return sc
}
func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
scAnnotation := make(map[string]string)
scAnnotation[v1.BetaStorageClassAnnotation] = storageclass.Name
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
Annotations: scAnnotation,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
},
},
},
}
return claim
}
// func to get pod spec with given volume claim, node selector labels and command
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pod-pvc-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
if nodeSelectorKV != nil {
pod.Spec.NodeSelector = nodeSelectorKV
}
return pod
}
// func to get pod spec with given volume paths, node selector lables and container commands
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
var volumeMounts []v1.VolumeMount
var volumes []v1.Volume
for index, volumePath := range volumePaths {
name := fmt.Sprintf("volume%v", index+1)
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
vsphereVolume.VolumePath = volumePath
vsphereVolume.FSType = "ext4"
volumes = append(volumes, v1.Volume{Name: name})
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
}
if commands == nil || len(commands) == 0 {
commands = []string{
"/bin/sh",
"-c",
"while true; do sleep 2; done",
}
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "busybox",
Command: commands,
VolumeMounts: volumeMounts,
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: volumes,
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
}
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
}
}
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
Expect(err).NotTo(HaveOccurred())
}
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}
// Get vSphere Volume Path from PVC
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pv.Spec.VsphereVolume.VolumePath
}
// Get canonical volume path for volume Path.
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
var folderID string
canonicalVolumePath := volumePath
dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
if err != nil {
return "", err
}
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
if len(dsPath) <= 1 {
return canonicalVolumePath, nil
}
datastore := dsPathObj.Datastore
dsFolder := dsPath[0]
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
if !isValidUUID(dsFolder) {
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
// Querying a non-existent dummy disk on the datastore folder.
// It would fail and return an folder ID in the error message.
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
if err != nil {
re := regexp.MustCompile("File (.*?) was not found")
match := re.FindStringSubmatch(err.Error())
canonicalVolumePath = match[1]
}
}
diskPath := getPathFromVMDiskPath(canonicalVolumePath)
if diskPath == "" {
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
}
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
return canonicalVolumePath, nil
}
// getPathFromVMDiskPath retrieves the path from VM Disk Path.
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
func getPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
}
//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil
}
// getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(dc.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
//isValidUUID checks if the string is a valid UUID.
func isValidUUID(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}
// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
} else {
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
}
}
}
}
return nil, nil
}
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
fileExt := ".vmdk"
diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
if err != nil {
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
}
vmVolumes[nodeName] = volPaths
}
return vmVolumes, nil
}
// convertVolPathToDevicePath takes volPath and returns canonical volume path
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
if err != nil {
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
return canonicalVolumePath, nil
}
// get .vmx file path for a virtual machine
func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var nodeVM mo.VirtualMachine
err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
Expect(err).NotTo(HaveOccurred())
Expect(nodeVM.Config).NotTo(BeNil())
vmxPath = nodeVM.Config.Files.VmPathName
framework.Logf("vmx file path is %s", vmxPath)
return vmxPath
}
// verify ready node count. Try upto 3 minutes. Return true if count is expected count
func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
numNodes := 0
for i := 0; i < 36; i++ {
nodeList := framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
numNodes = len(nodeList.Items)
if numNodes == expectedNodes {
break
}
time.Sleep(5 * time.Second)
}
return (numNodes == expectedNodes)
}
// poweroff nodeVM and confirm the poweroff state
func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Powering off node VM %s", nodeName)
_, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node")
}
// poweron nodeVM and confirm the poweron state
func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Powering on node VM %s", nodeName)
vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node")
}
// unregister a nodeVM from VC
func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
poweroffNodeVM(nodeName, vm)
framework.Logf("Unregistering node VM %s", nodeName)
err := vm.Unregister(ctx)
Expect(err).NotTo(HaveOccurred(), "Unable to unregister the node")
}
// register a nodeVM into a VC
func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, true)
vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
Expect(err).NotTo(HaveOccurred())
registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
Expect(err).NotTo(HaveOccurred())
err = registerTask.Wait(ctx)
Expect(err).NotTo(HaveOccurred())
vmPath := filepath.Join(workingDir, nodeName)
vm, err := finder.VirtualMachine(ctx, vmPath)
Expect(err).NotTo(HaveOccurred())
poweronNodeVM(nodeName, vm)
}
// disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[string]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
for vm, volumes := range vmVolumes {
volumeAttachedMap := make(map[string]bool)
for _, volume := range volumes {
attached, err := diskIsAttached(volume, vm)
if err != nil {
return nil, err
}
volumeAttachedMap[volume] = attached
}
nodeVolumesAttachMap[vm] = volumeAttachedMap
}
return disksAttached, nil
}
// diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
return false, err
}
if device == nil {
return false, nil
}
framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
return true, nil
}
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
// this gives the VM UUID which can be used to find Node VM from vCenter
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
var nodesInfo []*NodeInfo
for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
if nodeInfo != nil {
nodesInfo = append(nodesInfo, nodeInfo)
}
}
return nodesInfo
}
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
rand.Seed(time.Now().Unix())
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}

View File

@ -14,19 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -39,21 +36,25 @@ import (
1. CLUSTER_DATASTORE which should be set to clustered datastore
2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore
*/
var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-provision")
var client clientset.Interface
var namespace string
var scParameters map[string]string
var clusterDatastore string
var (
client clientset.Interface
namespace string
scParameters map[string]string
clusterDatastore string
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeInfo = GetReadySchedulableRandomNodeInfo()
scParameters = make(map[string]string)
clusterDatastore = os.Getenv("CLUSTER_DATASTORE")
Expect(clusterDatastore).NotTo(BeEmpty(), "Please set CLUSTER_DATASTORE system environment. eg: export CLUSTER_DATASTORE=<cluster_name>/<datastore_name")
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
})
/*
@ -68,21 +69,19 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
It("verify static provisioning on clustered datastore", func() {
var volumePath string
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("creating a test vsphere volume")
volumeOptions := new(vclib.VolumeOptions)
volumeOptions := new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + namespace
volumeOptions.Datastore = clusterDatastore
volumePath, err = createVSphereVolume(vsp, volumeOptions)
volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Deleting the vsphere volume")
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
@ -96,10 +95,10 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
nodeName := types.NodeName(pod.Spec.NodeName)
nodeName := pod.Spec.NodeName
By("Verifying volume is attached")
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, nodeName)
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName))
@ -108,7 +107,7 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
Expect(err).NotTo(HaveOccurred())
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(client, vsp, volumePath, nodeName)
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
})
@ -128,9 +127,8 @@ var _ = SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere
2. invokeValidPolicyTest - util to do e2e dynamic provision test
*/
It("verify dynamic provision with spbm policy on clustered datastore", func() {
storagePolicy := os.Getenv("VSPHERE_SPBM_POLICY_DS_CLUSTER")
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_POLICY_DS_CLUSTER system environment")
scParameters[SpbmStoragePolicy] = storagePolicy
policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
scParameters[SpbmStoragePolicy] = policyDatastoreCluster
invokeValidPolicyTest(f, client, namespace, scParameters)
})
})

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -44,7 +45,7 @@ const (
4. Verify the error returned on PVC failure is the correct.
*/
var _ = SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-datastore")
var (
client clientset.Interface
@ -53,6 +54,7 @@ var _ = SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func()
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
scParameters = make(map[string]string)

View File

@ -14,24 +14,22 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -52,8 +50,11 @@ import (
11. Delete PVC, PV and Storage Class
*/
var _ = SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-disk-format")
const (
NodeLabelKey = "vsphere_e2e_label_volume_diskformat"
)
var (
client clientset.Interface
namespace string
@ -64,26 +65,22 @@ var _ = SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel["vsphere_e2e_label"] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, "vsphere_e2e_label", nodeLabelValue)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
isNodeLabeled = true
}
})
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(client, nodeName, "vsphere_e2e_label")
framework.RemoveLabelOffNode(client, nodeName, NodeLabelKey)
}
})
@ -145,37 +142,36 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
verifyVSphereDiskAttached(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
Expect(verifyDiskFormat(nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, nodeName, volumePaths)
deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
}
func verifyDiskFormat(nodeName string, pvVolumePath string, diskFormat string) bool {
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
By("Verifing disk format")
eagerlyScrub := false
thinProvisioned := false
diskFound := false
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
Expect(err).NotTo(HaveOccurred())
f := find.NewFinder(govMoMiClient.Client, true)
ctx, _ := context.WithCancel(context.Background())
vm, err := f.VirtualMachine(ctx, os.Getenv("VSPHERE_WORKING_DIR")+nodeName)
Expect(err).NotTo(HaveOccurred())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred())

View File

@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"strings"
"time"
@ -28,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -44,7 +44,7 @@ const (
4. Verify the error returned on PVC failure is the correct.
*/
var _ = SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-disksize")
var (
client clientset.Interface
@ -54,15 +54,11 @@ var _ = SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
scParameters = make(map[string]string)
datastore = os.Getenv("VSPHERE_DATASTORE")
Expect(datastore).NotTo(BeEmpty())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
})
It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"strings"
@ -24,10 +24,9 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -63,7 +62,7 @@ const (
7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node.
*/
var _ = SIGDescribe("Volume FStype [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-fstype")
var (
client clientset.Interface
@ -71,10 +70,10 @@ var _ = SIGDescribe("Volume FStype [Feature:vsphere]", func() {
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node")
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
})
It("verify fstype - ext3 formatted volume", func() {
@ -97,28 +96,25 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes, vsp)
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred())
// Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
}
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
By("Creating Storage Class With Invalid Fstype")
@ -134,8 +130,9 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
// Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
Expect(eventList.Items).NotTo(BeEmpty())
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
@ -165,7 +162,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
return pvclaim, persistentvolumes
}
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) *v1.Pod {
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node")
@ -175,18 +172,18 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
// Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
return pod
}
func detachVolume(f *framework.Framework, client clientset.Interface, vsp *vsphere.VSphere, pod *v1.Pod, volPath string) {
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
nodeName := pod.Spec.NodeName
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, volPath, k8stype.NodeName(pod.Spec.NodeName))
}
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {
framework.DeletePersistentVolumeClaim(client, pvclaimName, namespace)
waitForVSphereDiskToDetach(volPath, nodeName)
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
@ -24,10 +24,10 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -41,7 +41,7 @@ import (
6. Delete the pod and wait for the volume to be detached
7. Delete the volume
*/
var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() {
var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("restart-master")
const labelKey = "vsphere_e2e_label"
@ -53,9 +53,11 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
numNodes int
nodeKeyValueLabelList []map[string]string
nodeNameList []string
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
@ -65,7 +67,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
if numNodes < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
for i := 0; i < numNodes; i++ {
nodeName := nodes.Items[i].Name
nodeNameList = append(nodeNameList, nodeName)
@ -78,15 +80,11 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
})
It("verify volume remains attached after master kubelet restart", func() {
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create pod on each node
for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
@ -103,9 +101,9 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
pods = append(pods, pod)
nodeName := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("Verify volume %s is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
@ -113,7 +111,7 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22"
err = framework.RestartKubelet(masterAddress)
err := framework.RestartKubelet(masterAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up")
@ -122,23 +120,22 @@ var _ = SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]"
for i, pod := range pods {
volumePath := volumePaths[i]
nodeName := types.NodeName(pod.Spec.NodeName)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePaths[i], types.NodeName(nodeName))
isAttached, err := diskIsAttached(volumePaths[i], nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
By(fmt.Sprintf("Deleting pod on node %v", nodeName))
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %v", volumePath, nodeName))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, types.NodeName(nodeName))
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))
err = vsp.DeleteVolume(volumePath)
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
})

View File

@ -0,0 +1,119 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"golang.org/x/net/context"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-unregister")
var (
client clientset.Interface
namespace string
workingDir string
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
Expect(err).NotTo(HaveOccurred())
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
})
It("node unregister", func() {
By("Get total Ready nodes")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
totalNodesCount := len(nodeList.Items)
nodeVM := nodeList.Items[0]
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeVM.ObjectMeta.Name)
vmObject := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
// Find VM .vmx file path, host, resource pool.
// They are required to register a node VM to VC
vmxFilePath := getVMXFilePath(vmObject)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vmHost, err := vmObject.HostSystem(ctx)
Expect(err).NotTo(HaveOccurred())
vmPool, err := vmObject.ResourcePool(ctx)
Expect(err).NotTo(HaveOccurred())
// Unregister Node VM
By("Unregister a node VM")
unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject)
// Ready nodes should be 1 less
By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
var nodeNameList []string
for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
}
Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name))
// Register Node VM
By("Register back the node VM")
registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost)
// Ready nodes should be equal to earlier count
By("Verifying the ready node counts")
Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
nodeNameList = nodeNameList[:0]
for _, node := range nodeList.Items {
nodeNameList = append(nodeNameList, node.ObjectMeta.Name)
}
Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name))
// Sanity test that pod provisioning works
By("Sanity check for volume lifecycle")
scParameters := make(map[string]string)
storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment")
scParameters[SpbmStoragePolicy] = storagePolicy
invokeValidPolicyTest(f, client, namespace, scParameters)
})
})

View File

@ -14,28 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"golang.org/x/net/context"
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -43,28 +40,22 @@ import (
1. Verify the pod got provisioned on a different node with volume attached to it
2. Verify the volume is detached from the powered off node
*/
var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() {
var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-poweroff")
var (
client clientset.Interface
namespace string
vsp *vsphere.VSphere
workingDir string
err error
client clientset.Interface
namespace string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
})
/*
@ -102,31 +93,25 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, namespace, pvclaims, "")
defer client.Extensions().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
defer client.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := types.NodeName(pod.Spec.NodeName)
node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, node1)
isAttached, err := diskIsAttached(volumePath, node1)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1))
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
Expect(err).NotTo(HaveOccurred())
f := find.NewFinder(govMoMiClient.Client, true)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, _ := context.WithCancel(context.Background())
vmPath := filepath.Join(workingDir, string(node1))
vm, err := f.VirtualMachine(ctx, vmPath)
Expect(err).NotTo(HaveOccurred())
_, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
defer vm.PowerOn(ctx)
@ -139,11 +124,11 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(client, vsp, volumePath, node2)
err = waitForVSphereDiskToAttach(volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, node1)
err = waitForVSphereDiskToDetach(volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1))
@ -154,10 +139,10 @@ var _ = SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func(
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode types.NodeName) (types.NodeName, error) {
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) {
var (
err error
newNode types.NodeName
newNode string
timeout = 3 * time.Minute
pollTime = 10 * time.Second
)
@ -188,10 +173,11 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
return getNodeForDeployment(client, deployment)
}
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (types.NodeName, error) {
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}
return types.NodeName(podList.Items[0].Spec.NodeName), nil
return podList.Items[0].Spec.NodeName, nil
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
@ -25,10 +25,9 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
@ -47,7 +46,7 @@ import (
10. Delete storage class.
*/
var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-ops-storm")
const DEFAULT_VOLUME_OPS_SCALE = 30
var (
@ -58,16 +57,13 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
persistentvolumes []*v1.PersistentVolume
err error
volume_ops_scale int
vsp *vsphere.VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred())
@ -75,8 +71,6 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
}
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By("Deleting PVCs")
@ -113,14 +107,14 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
})
})

View File

@ -14,21 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/* This test calculates latency numbers for volume lifecycle operations
@ -48,46 +46,33 @@ const (
DeleteOp = "DeleteOp"
)
var _ = SIGDescribe("vcp-performance [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-performance")
var (
client clientset.Interface
namespace string
nodeSelectorList []*NodeSelector
policyName string
datastoreName string
volumeCount int
volumesPerPod int
iterations int
policyName string
datastoreName string
)
BeforeEach(func() {
var err error
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
// Read the environment variables
volumeCountStr := os.Getenv("VCP_PERF_VOLUME_COUNT")
Expect(volumeCountStr).NotTo(BeEmpty(), "ENV VCP_PERF_VOLUME_COUNT is not set")
volumeCount, err = strconv.Atoi(volumeCountStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_VOLUME_COUNT")
volumeCount = GetAndExpectIntEnvVar(VCPPerfVolumeCount)
volumesPerPod = GetAndExpectIntEnvVar(VCPPerfVolumesPerPod)
iterations = GetAndExpectIntEnvVar(VCPPerfIterations)
volumesPerPodStr := os.Getenv("VCP_PERF_VOLUME_PER_POD")
Expect(volumesPerPodStr).NotTo(BeEmpty(), "ENV VCP_PERF_VOLUME_PER_POD is not set")
volumesPerPod, err = strconv.Atoi(volumesPerPodStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_VOLUME_PER_POD")
iterationsStr := os.Getenv("VCP_PERF_ITERATIONS")
Expect(iterationsStr).NotTo(BeEmpty(), "ENV VCP_PERF_ITERATIONS is not set")
iterations, err = strconv.Atoi(iterationsStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_ITERATIONS")
policyName = os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
datastoreName = os.Getenv("VSPHERE_DATASTORE")
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_GOLD_POLICY is not set")
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes := framework.GetReadySchedulableNodesOrDie(client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
@ -175,7 +160,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
totalpvs [][]*v1.PersistentVolume
totalpods []*v1.Pod
)
nodeVolumeMap := make(map[types.NodeName][]string)
nodeVolumeMap := make(map[string][]string)
latency = make(map[string]float64)
numPods := volumeCount / volumesPerPod
@ -212,18 +197,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds()
// Verify access to the volumes
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
for i, pod := range totalpods {
verifyVSphereVolumesAccessible(client, pod, totalpvs[i], vsp)
verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
}
By("Deleting pods")
start = time.Now()
for _, pod := range totalpods {
err = framework.DeletePodWithWait(f, client, pod)
err := framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
}
elapsed = time.Since(start)
@ -231,12 +212,11 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
for i, pod := range totalpods {
for _, pv := range totalpvs[i] {
nodeName := types.NodeName(pod.Spec.NodeName)
nodeVolumeMap[nodeName] = append(nodeVolumeMap[nodeName], pv.Spec.VsphereVolume.VolumePath)
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
}
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
err := waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
By("Deleting the PVCs")

View File

@ -14,59 +14,60 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"os"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = SIGDescribe("Volume Placement", func() {
var _ = utils.SIGDescribe("Volume Placement", func() {
f := framework.NewDefaultFramework("volume-placement")
const (
NodeLabelKey = "vsphere_e2e_label_volume_placement"
)
var (
c clientset.Interface
ns string
vsp *vsphere.VSphere
volumePaths []string
node1Name string
node1KeyValueLabel map[string]string
node2Name string
node2KeyValueLabel map[string]string
isNodeLabeled bool
err error
nodeInfo *NodeInfo
vsp *VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
By("creating vmdk")
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
})
AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath)
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
@ -80,10 +81,10 @@ var _ = SIGDescribe("Volume Placement", func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 {
if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, "vsphere_e2e_label")
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
}
if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, "vsphere_e2e_label")
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
}
}
})
@ -103,24 +104,24 @@ var _ = SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
@ -143,23 +144,23 @@ var _ = SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node2Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
})
/*
@ -178,12 +179,12 @@ var _ = SIGDescribe("Volume Placement", func() {
It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
@ -191,9 +192,9 @@ var _ = SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFilesNames := []string{
@ -220,17 +221,18 @@ var _ = SIGDescribe("Volume Placement", func() {
*/
It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore")
var volumeOptions *vclib.VolumeOptions
volumeOptions = new(vclib.VolumeOptions)
var volumeOptions *VolumeOptions
volumeOptions = new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = os.Getenv("VSPHERE_SECOND_SHARED_DATASTORE")
volumePath, err := createVSphereVolume(vsp, volumeOptions)
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
@ -239,10 +241,10 @@ var _ = SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileNames := []string{
@ -252,7 +254,7 @@ var _ = SIGDescribe("Volume Placement", func() {
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
@ -285,24 +287,24 @@ var _ = SIGDescribe("Volume Placement", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(node1Name)))
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
}
}()
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodA)
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
@ -339,17 +341,17 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod
node2Name = nodes.Items[1].Name
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel["vsphere_e2e_label"] = node1LabelValue
framework.AddOrUpdateLabelOnNode(client, node1Name, "vsphere_e2e_label", node1LabelValue)
node1KeyValueLabel[NodeLabelKey] = node1LabelValue
framework.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue)
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel["vsphere_e2e_label"] = node2LabelValue
framework.AddOrUpdateLabelOnNode(client, node2Name, "vsphere_e2e_label", node2LabelValue)
node2KeyValueLabel[NodeLabelKey] = node2LabelValue
framework.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue)
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
}
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod
var err error
By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
@ -362,7 +364,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths {
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
}
@ -379,12 +381,12 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck)
}
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) {
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(nodeName)))
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
}
}

View File

@ -14,26 +14,22 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
package vsphere
import (
"fmt"
"hash/fnv"
"os"
"time"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/find"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -63,7 +59,7 @@ const (
/*
Test to verify the storage policy based management for dynamic volume provisioning inside kubernetes.
There are 2 ways to achive it:
There are 2 ways to achieve it:
1. Specify VSAN storage capabilities in the storage-class.
2. Use existing vCenter SPBM storage policies.
@ -90,68 +86,77 @@ const (
*/
var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() {
var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-vsan-policy")
var (
client clientset.Interface
namespace string
scParameters map[string]string
policyName string
tagPolicy string
masterNode string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
framework.Logf("framework: %+v", f)
scParameters = make(map[string]string)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
Expect(masternodes).NotTo(BeEmpty())
masterNode = masternodes.List()[0]
})
// Valid policy.
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = "1"
scParameters[Policy_ObjectSpaceReservation] = "30"
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Valid policy.
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
// Invalid VSAN storage capabilties parameters.
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
@ -163,10 +168,10 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
// Invalid policy on a VSAN test bed.
// diskStripes value has to be between 1 and 12.
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
@ -178,9 +183,9 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
// Invalid policy on a VSAN test bed.
// hostFailuresToTolerate value has to be between 0 and 3 including.
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
@ -192,11 +197,11 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
// Specify a valid VSAN policy on a non-VSAN test bed.
// The test should fail.
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VmfsDatastore
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
@ -207,12 +212,10 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
})
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for SPBM policy: %s", os.Getenv("VSPHERE_SPBM_GOLD_POLICY")))
goldPolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
Expect(goldPolicy).NotTo(BeEmpty())
scParameters[SpbmStoragePolicy] = goldPolicy
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
scParameters[SpbmStoragePolicy] = policyName
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking Test for SPBM storage policy: %+v", scParameters)
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
invokeValidPolicyTest(f, client, namespace, scParameters)
})
@ -220,33 +223,30 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking Test for SPBM storage policy: %+v", scParameters)
clusterName := os.Getenv("VSPHERE_KUBERNETES_CLUSTER")
Expect(clusterName).NotTo(BeEmpty())
invokeStaleDummyVMTestWithStoragePolicy(client, namespace, clusterName, scParameters)
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
})
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for SPBM policy: %s and datastore: %s", os.Getenv("VSPHERE_SPBM_TAG_POLICY"), VsanDatastore))
tagPolicy := os.Getenv("VSPHERE_SPBM_TAG_POLICY")
Expect(tagPolicy).NotTo(BeEmpty())
By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
scParameters[SpbmStoragePolicy] = tagPolicy
scParameters[Datastore] = VsanDatastore
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking Test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + os.Getenv("VSPHERE_SPBM_TAG_POLICY") + "\\\""
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for SPBM policy: %s", BronzeStoragePolicy))
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking Test for non-existing SPBM storage policy: %+v", scParameters)
framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
@ -256,14 +256,12 @@ var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]"
})
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
By(fmt.Sprintf("Invoking Test for SPBM policy: %s with VSAN storage capabilities", os.Getenv("VSPHERE_SPBM_GOLD_POLICY")))
goldPolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
Expect(goldPolicy).NotTo(BeEmpty())
scParameters[SpbmStoragePolicy] = goldPolicy
By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
scParameters[SpbmStoragePolicy] = policyName
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
scParameters[DiskFormat] = ThinDisk
framework.Logf("Invoking Test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
Expect(err).To(HaveOccurred())
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
@ -295,16 +293,14 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
@ -326,7 +322,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespace string, clusterName string, scParameters map[string]string) {
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
@ -353,23 +349,6 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespa
fnvHash.Write([]byte(vmName))
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
Expect(isDummyVMPresent(dummyVMFullName)).NotTo(BeTrue(), errorMsg)
}
func isDummyVMPresent(vmName string) bool {
By("Verifing if the dummy VM is deleted by the vSphere Cloud Provider clean up routine")
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
Expect(err).NotTo(HaveOccurred())
f := find.NewFinder(govMoMiClient.Client, true)
ctx, _ := context.WithCancel(context.Background())
workingDir := os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty())
vmPath := workingDir + vmName
_, err = f.VirtualMachine(ctx, vmPath)
if err != nil {
return false
}
return true
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg)
}

View File

@ -1,459 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"path/filepath"
"strconv"
"time"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
volumesPerNode = 55
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
)
// volumeState represents the state of a volume.
type volumeState int32
const (
volumeStateDetached volumeState = 1
volumeStateAttached volumeState = 2
)
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
isAttached bool
err error
)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
return isAttached, err
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[k8stype.NodeName][]string) error {
var (
err error
disksAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
if vsp == nil {
vsp, err = getVSphere(c)
if err != nil {
return err
}
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := vsp.DisksAreAttached(nodeVolumes)
if err != nil {
return false, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if attached {
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
return false, nil
}
}
}
disksAttached = false
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
return true, nil
})
if err != nil {
return err
}
if disksAttached {
return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
}
return nil
}
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName, expectedState volumeState) error {
var (
err error
diskAttached bool
currentState volumeState
timeout = 6 * time.Minute
pollTime = 10 * time.Second
)
var attachedState = map[bool]volumeState{
true: volumeStateAttached,
false: volumeStateDetached,
}
var attachedStateMsg = map[volumeState]string{
volumeStateAttached: "attached to",
volumeStateDetached: "detached from",
}
err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(c, vsp, volumePath, nodeName)
if err != nil {
return true, err
}
currentState = attachedState[diskAttached]
if currentState == expectedState {
framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
return true, nil
}
framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
return false, nil
})
if err != nil {
return err
}
if currentState != expectedState {
err = fmt.Errorf("Gave up waiting for Volume %q to be %s %q after %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout)
}
return err
}
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateAttached)
}
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateDetached)
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
var (
pvConfig framework.PersistentVolumeConfig
pv *v1.PersistentVolume
claimRef *v1.ObjectReference
)
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
Prebind: nil,
}
pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
PersistentVolumeSource: pvConfig.PVSource,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: claimRef,
},
}
if labels != nil {
pv.Labels = labels
}
return pv
}
// function to get vsphere persistent volume spec with given selector labels.
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
var (
pvc *v1.PersistentVolumeClaim
)
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
}
if labels != nil {
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
return pvc
}
// function to create vmdk volume
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
var (
volumePath string
err error
)
if volumeOptions == nil {
volumeOptions = new(vclib.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
volumePath, err = vsp.CreateVolume(volumeOptions)
Expect(err).NotTo(HaveOccurred())
return volumePath, nil
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
framework.Logf("Done with writing content to volume")
}
// function to verify content is matching on the volume backed for given PVC
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
framework.Logf("Successfully verified content of the volume")
}
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
var sc *storage.StorageClass
sc = &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Provisioner: "kubernetes.io/vsphere-volume",
}
if scParameters != nil {
sc.Parameters = scParameters
}
return sc
}
func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
scAnnotation := make(map[string]string)
scAnnotation[v1.BetaStorageClassAnnotation] = storageclass.Name
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
Annotations: scAnnotation,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
},
},
},
}
return claim
}
// func to get pod spec with given volume claim, node selector labels and command
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pod-pvc-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
if nodeSelectorKV != nil {
pod.Spec.NodeSelector = nodeSelectorKV
}
return pod
}
// func to get pod spec with given volume paths, node selector lables and container commands
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
var volumeMounts []v1.VolumeMount
var volumes []v1.Volume
for index, volumePath := range volumePaths {
name := fmt.Sprintf("volume%v", index+1)
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
vsphereVolume.VolumePath = volumePath
vsphereVolume.FSType = "ext4"
volumes = append(volumes, v1.Volume{Name: name})
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
}
if commands == nil || len(commands) == 0 {
commands = []string{
"/bin/sh",
"-c",
"while true; do sleep 2; done",
}
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "busybox",
Command: commands,
VolumeMounts: volumeMounts,
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: volumes,
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
}
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
}
}
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
Expect(err).NotTo(HaveOccurred())
}
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}
// Get vSphere Volume Path from PVC
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pv.Spec.VsphereVolume.VolumePath
}
func addNodesToVCP(vsp *vsphere.VSphere, c clientset.Interface) error {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodes.Items {
vsp.NodeAdded(&node)
}
return nil
}
func getVSphere(c clientset.Interface) (*vsphere.VSphere, error) {
vsp, err := vsphere.GetVSphere()
if err != nil {
return nil, err
}
addNodesToVCP(vsp, c)
return vsp, nil
}