rebase: update kubernetes to v1.20.0

updated kubernetes packages to latest
release.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2020-12-17 17:58:29 +05:30
committed by mergify[bot]
parent 4abe128bd8
commit 83559144b1
1624 changed files with 247222 additions and 160270 deletions

62
vendor/k8s.io/kubernetes/test/e2e/storage/utils/BUILD generated vendored Normal file
View File

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"create.go",
"deployment.go",
"ebs.go",
"framework.go",
"host_exec.go",
"local.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/exec:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,637 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// LoadFromManifests loads .yaml or .json manifest files and returns
// all items that it finds in them. It supports all items for which
// there is a factory registered in factories and .yaml files with
// multiple items separated by "---". Files are accessed via the
// "testfiles" package, which means they can come from a file system
// or be built into the binary.
//
// LoadFromManifests has some limitations:
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
func LoadFromManifests(files ...string) ([]interface{}, error) {
var items []interface{}
err := visitManifests(func(data []byte) error {
// Ignore any additional fields for now, just determine what we have.
var what What
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
return errors.Wrap(err, "decode TypeMeta")
}
factory := factories[what]
if factory == nil {
return errors.Errorf("item of type %+v not supported", what)
}
object := factory.New()
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, object); err != nil {
return errors.Wrapf(err, "decode %+v", what)
}
items = append(items, object)
return nil
}, files...)
return items, err
}
func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := e2etestfiles.Read(fileName)
if err != nil {
framework.Failf("reading manifest file: %v", err)
}
// Split at the "---" separator before working on
// individual item. Only works for .yaml.
//
// We need to split ourselves because we need access
// to each original chunk of data for
// runtime.DecodeInto. kubectl has its own
// infrastructure for this, but that is a lot of code
// with many dependencies.
items := bytes.Split(data, []byte("\n---"))
for _, item := range items {
if err := cb(item); err != nil {
return errors.Wrap(err, fileName)
}
}
}
return nil
}
// PatchItems modifies the given items in place such that each test
// gets its own instances, to avoid conflicts between different tests
// and between tests and normal deployments.
//
// This is done by:
// - creating namespaced items inside the test's namespace
// - changing the name of non-namespaced items like ClusterRole
//
// PatchItems has some limitations:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := patchItemRecursively(f, driverNamspace, item); err != nil {
return err
}
}
return nil
}
// CreateItems creates the items. Each of them must be an API object
// of a type that is registered in Factory.
//
// It returns either a cleanup function or an error, but never both.
//
// Cleaning up after a test can be triggered in two ways:
// - the test invokes the returned cleanup function,
// usually in an AfterEach
// - the test suite terminates, potentially after
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
//
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
var destructors []func() error
cleanup := func() {
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
framework.Logf("deleting failed: %s", err)
}
}
}
var result error
for _, item := range items {
// Each factory knows which item(s) it supports, so try each one.
done := false
description := describeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
framework.Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, ns, item)
if destructor != nil {
destructors = append(destructors, func() error {
framework.Logf("deleting %s", description)
return destructor()
})
}
if err == nil {
done = true
break
} else if errors.Cause(err) != errorItemNotSupported {
result = err
break
}
}
if result == nil && !done {
result = errors.Errorf("item of type %T not supported", item)
break
}
}
if result != nil {
cleanup()
return nil, result
}
return cleanup, nil
}
// CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function,
// and CreateItems.
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
items, err := LoadFromManifests(files...)
if err != nil {
return nil, errors.Wrap(err, "CreateFromManifests")
}
if err := PatchItems(f, driverNamespace, items...); err != nil {
return nil, err
}
if patch != nil {
for _, item := range items {
if err := patch(item); err != nil {
return nil, err
}
}
}
return CreateItems(f, driverNamespace, items...)
}
// What is a subset of metav1.TypeMeta which (in contrast to
// metav1.TypeMeta itself) satisfies the runtime.Object interface.
type What struct {
Kind string `json:"kind"`
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new What.
func (in *What) DeepCopy() *What {
return &What{Kind: in.Kind}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out.
func (in *What) DeepCopyInto(out *What) {
out.Kind = in.Kind
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *What) DeepCopyObject() runtime.Object {
return &What{Kind: in.Kind}
}
// GetObjectKind returns the ObjectKind schema
func (in *What) GetObjectKind() schema.ObjectKind {
return nil
}
// ItemFactory provides support for creating one particular item.
// The type gets exported because other packages might want to
// extend the set of pre-defined factories.
type ItemFactory interface {
// New returns a new empty item.
New() runtime.Object
// Create is responsible for creating the item. It returns an
// error or a cleanup function for the created item.
// If the item is of an unsupported type, it must return
// an error that has errorItemNotSupported as cause.
Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error)
}
// describeItem always returns a string that describes the item,
// usually by calling out to cache.MetaNamespaceKeyFunc which
// concatenates namespace (if set) and name. If that fails, the entire
// item gets converted to a string.
func describeItem(item interface{}) string {
key, err := cache.MetaNamespaceKeyFunc(item)
if err == nil && key != "" {
return fmt.Sprintf("%T: %s", item, key)
}
return fmt.Sprintf("%T: %s", item, item)
}
// errorItemNotSupported is the error that Create methods
// must return or wrap when they don't support the given item.
var errorItemNotSupported = errors.New("not supported")
var factories = map[What]ItemFactory{
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"CSIDriver"}: &csiDriverFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
{"Service"}: &serviceFactory{},
{"ServiceAccount"}: &serviceAccountFactory{},
{"StatefulSet"}: &statefulSetFactory{},
{"StorageClass"}: &storageClassFactory{},
}
// PatchName makes the name of some item unique by appending the
// generated unique name.
func PatchName(f *framework.Framework, item *string) {
if *item != "" {
*item = *item + "-" + f.UniqueName
}
}
// PatchNamespace moves the item into the test's namespace. Not
// all items can be namespaced. For those, the name also needs to be
// patched.
func PatchNamespace(f *framework.Framework, driverNamespace *v1.Namespace, item *string) {
if driverNamespace != nil {
*item = driverNamespace.GetName()
return
}
if f.Namespace != nil {
*item = f.Namespace.GetName()
}
}
func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace, item interface{}) error {
switch item := item.(type) {
case *rbacv1.Subject:
PatchNamespace(f, driverNamespace, &item.Namespace)
case *rbacv1.RoleRef:
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
// which contains all role names that are defined cluster-wide before the test starts?
// All those names are excempt from renaming. That list could be populated by querying
// and get extended by tests.
if item.Name != "e2e-test-privileged-psp" {
PatchName(f, &item.Name)
}
case *rbacv1.ClusterRole:
PatchName(f, &item.Name)
case *rbacv1.Role:
PatchNamespace(f, driverNamespace, &item.Namespace)
// Roles are namespaced, but because for RoleRef above we don't
// know whether the referenced role is a ClusterRole or Role
// and therefore always renames, we have to do the same here.
PatchName(f, &item.Name)
case *storagev1.StorageClass:
PatchName(f, &item.Name)
case *storagev1.CSIDriver:
PatchName(f, &item.Name)
case *v1.ServiceAccount:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *v1.Secret:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *rbacv1.ClusterRoleBinding:
PatchName(f, &item.Name)
for i := range item.Subjects {
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *rbacv1.RoleBinding:
PatchNamespace(f, driverNamespace, &item.Namespace)
for i := range item.Subjects {
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *v1.Service:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *appsv1.StatefulSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *appsv1.DaemonSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
default:
return errors.Errorf("missing support for patching item of type %T", item)
}
return nil
}
// The individual factories all follow the same template, but with
// enough differences in types and functions that copy-and-paste
// looked like the least dirty approach. Perhaps one day Go will have
// generics.
type serviceAccountFactory struct{}
func (f *serviceAccountFactory) New() runtime.Object {
return &v1.ServiceAccount{}
}
func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.ServiceAccount)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ServiceAccount")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type clusterRoleFactory struct{}
func (f *clusterRoleFactory) New() runtime.Object {
return &rbacv1.ClusterRole{}
}
func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.ClusterRole)
if !ok {
return nil, errorItemNotSupported
}
framework.Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type clusterRoleBindingFactory struct{}
func (f *clusterRoleBindingFactory) New() runtime.Object {
return &rbacv1.ClusterRoleBinding{}
}
func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.ClusterRoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().ClusterRoleBindings()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ClusterRoleBinding")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type roleFactory struct{}
func (f *roleFactory) New() runtime.Object {
return &rbacv1.Role{}
}
func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.Role)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().Roles(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Role")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type roleBindingFactory struct{}
func (f *roleBindingFactory) New() runtime.Object {
return &rbacv1.RoleBinding{}
}
func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.RoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create RoleBinding")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type serviceFactory struct{}
func (f *serviceFactory) New() runtime.Object {
return &v1.Service{}
}
func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.Service)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Services(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Service")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type statefulSetFactory struct{}
func (f *statefulSetFactory) New() runtime.Object {
return &appsv1.StatefulSet{}
}
func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.StatefulSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create StatefulSet")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type daemonSetFactory struct{}
func (f *daemonSetFactory) New() runtime.Object {
return &appsv1.DaemonSet{}
}
func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.DaemonSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create DaemonSet")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type storageClassFactory struct{}
func (f *storageClassFactory) New() runtime.Object {
return &storagev1.StorageClass{}
}
func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*storagev1.StorageClass)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().StorageClasses()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create StorageClass")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type csiDriverFactory struct{}
func (f *csiDriverFactory) New() runtime.Object {
return &storagev1.CSIDriver{}
}
func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*storagev1.CSIDriver)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().CSIDrivers()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create CSIDriver")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type secretFactory struct{}
func (f *secretFactory) New() runtime.Object {
return &v1.Secret{}
}
func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.Secret)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Secrets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Secret")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
// PrettyPrint returns a human-readable representation of an item.
func PrettyPrint(item interface{}) string {
data, err := json.MarshalIndent(item, "", " ")
if err == nil {
return string(data)
}
return fmt.Sprintf("%+v", item)
}
// patchContainerImages replaces the specified Container Registry with a custom
// one provided via the KUBE_TEST_REPO_LIST env variable
func patchContainerImages(containers []v1.Container) error {
var err error
for i, c := range containers {
containers[i].Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,204 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"path"
"strings"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// PatchCSIDeployment modifies the CSI driver deployment:
// - replaces the provisioner name
// - forces pods onto a specific host
//
// All of that is optional, see PatchCSIOptions. Just beware
// that not renaming the CSI driver deployment can be problematic:
// - when multiple tests deploy the driver, they need
// to run sequentially
// - might conflict with manual deployments
//
// This function is written so that it works for CSI driver deployments
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the provisioner binary accepts a --provisioner parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
//
// Driver deployments that are different will have to do the patching
// without this function, or skip patching entirely.
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
o.OldDriverName != o.NewDriverName
patchVolumes := func(volumes []v1.Volume) {
if !rename {
return
}
for i := range volumes {
volume := &volumes[i]
if volume.HostPath != nil {
// Update paths like /var/lib/kubelet/plugins/<provisioner>.
p := &volume.HostPath.Path
dir, file := path.Split(*p)
if file == o.OldDriverName {
*p = path.Join(dir, o.NewDriverName)
}
}
}
}
patchContainers := func(containers []v1.Container) {
for i := range containers {
container := &containers[i]
if rename {
for e := range container.Args {
// Inject test-specific provider name into paths like this one:
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
container.Args[e] = strings.Replace(container.Args[e], "/"+o.OldDriverName+"/", "/"+o.NewDriverName+"/", 1)
}
}
// Overwrite driver name resp. provider name
// by appending a parameter with the right
// value.
switch container.Name {
case o.DriverContainerName:
container.Args = append(container.Args, o.DriverContainerArguments...)
case o.ProvisionerContainerName:
// Driver name is expected to be the same
// as the provisioner here.
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
}
}
}
patchPodSpec := func(spec *v1.PodSpec) {
patchContainers(spec.Containers)
patchVolumes(spec.Volumes)
if o.NodeName != "" {
e2epod.SetNodeSelection(spec, e2epod.NodeSelection{Name: o.NodeName})
}
}
switch object := object.(type) {
case *appsv1.ReplicaSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.DaemonSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.StatefulSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.Deployment:
patchPodSpec(&object.Spec.Template.Spec)
case *storagev1.StorageClass:
if o.NewDriverName != "" {
// Driver name is expected to be the same
// as the provisioner name here.
object.Provisioner = o.NewDriverName
}
case *storagev1.CSIDriver:
if o.NewDriverName != "" {
object.Name = o.NewDriverName
}
if o.PodInfo != nil {
object.Spec.PodInfoOnMount = o.PodInfo
}
if o.StorageCapacity != nil {
object.Spec.StorageCapacity = o.StorageCapacity
}
if o.CanAttach != nil {
object.Spec.AttachRequired = o.CanAttach
}
if o.VolumeLifecycleModes != nil {
object.Spec.VolumeLifecycleModes = *o.VolumeLifecycleModes
}
if o.TokenRequests != nil {
object.Spec.TokenRequests = o.TokenRequests
}
if o.RequiresRepublish != nil {
object.Spec.RequiresRepublish = o.RequiresRepublish
}
if o.FSGroupPolicy != nil {
object.Spec.FSGroupPolicy = o.FSGroupPolicy
}
}
return nil
}
// PatchCSIOptions controls how PatchCSIDeployment patches the objects.
type PatchCSIOptions struct {
// The original driver name.
OldDriverName string
// The driver name that replaces the original name.
// Can be empty (not used at all) or equal to OldDriverName
// (then it will be added were appropriate without renaming
// in existing fields).
NewDriverName string
// The name of the container which has the CSI driver binary.
// If non-empty, DriverContainerArguments are added to argument
// list in container with that name.
DriverContainerName string
// List of arguments to add to container with
// DriverContainerName.
DriverContainerArguments []string
// The name of the container which has the provisioner binary.
// If non-empty, --provisioner with new name will be appended
// to the argument list.
ProvisionerContainerName string
// The name of the container which has the snapshotter binary.
// If non-empty, --snapshotter with new name will be appended
// to the argument list.
SnapshotterContainerName string
// If non-empty, all pods are forced to run on this node.
NodeName string
// If not nil, the value to use for the CSIDriver.Spec.PodInfo
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
PodInfo *bool
// If not nil, the value to use for the CSIDriver.Spec.CanAttach
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
CanAttach *bool
// If not nil, the value to use for the CSIDriver.Spec.StorageCapacity
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
StorageCapacity *bool
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
VolumeLifecycleModes *[]storagev1.VolumeLifecycleMode
// If not nil, the value to use for the CSIDriver.Spec.TokenRequests
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
TokenRequests []storagev1.TokenRequest
// If not nil, the value to use for the CSIDriver.Spec.RequiresRepublish
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
RequiresRepublish *bool
// If not nil, the value to use for the CSIDriver.Spec.FSGroupPolicy
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
FSGroupPolicy *storagev1.FSGroupPolicy
}

263
vendor/k8s.io/kubernetes/test/e2e/storage/utils/ebs.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
const (
volumeAttachmentStatusPollDelay = 2 * time.Second
volumeAttachmentStatusFactor = 2
volumeAttachmentStatusSteps = 6
// represents expected attachment status of a volume after attach
volumeAttachedStatus = "attached"
// represents expected attachment status of a volume after detach
volumeDetachedStatus = "detached"
)
// EBSUtil provides functions to interact with EBS volumes
type EBSUtil struct {
client *ec2.EC2
validDevices []string
}
// NewEBSUtil returns an instance of EBSUtil which can be used to
// to interact with EBS volumes
func NewEBSUtil(client *ec2.EC2) *EBSUtil {
ebsUtil := &EBSUtil{client: client}
validDevices := []string{}
for _, firstChar := range []rune{'b', 'c'} {
for i := 'a'; i <= 'z'; i++ {
dev := string([]rune{firstChar, i})
validDevices = append(validDevices, dev)
}
}
ebsUtil.validDevices = validDevices
return ebsUtil
}
// AttachDisk attaches an EBS volume to a node.
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err)
}
err = ebs.waitForAvailable(volumeID)
if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
}
device, err := ebs.findFreeDevice(instance)
if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
}
hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{
VolumeId: &volumeID,
InstanceId: instance.InstanceId,
Device: &hostDevice,
}
_, err = ebs.client.AttachVolume(attachInput)
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
}
return ebs.waitForAttach(volumeID)
}
func (ebs *EBSUtil) findFreeDevice(instance *ec2.Instance) (string, error) {
deviceMappings := map[string]string{}
for _, blockDevice := range instance.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
name = strings.TrimPrefix(name, "/dev/sd")
name = strings.TrimPrefix(name, "/dev/xvd")
if len(name) < 1 || len(name) > 2 {
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
deviceMappings[name] = aws.StringValue(blockDevice.Ebs.VolumeId)
}
for _, device := range ebs.validDevices {
if _, found := deviceMappings[device]; !found {
return device, nil
}
}
return "", fmt.Errorf("no available device")
}
func (ebs *EBSUtil) waitForAttach(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
if len(info.Attachments) > 1 {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
attachmentStatus := ""
for _, a := range info.Attachments {
if attachmentStatus != "" {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
if a.State != nil {
attachmentStatus = *a.State
} else {
// Shouldn't happen; log so we know if it is
klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a)
}
}
if attachmentStatus == "" {
attachmentStatus = volumeDetachedStatus
}
if attachmentStatus == volumeAttachedStatus {
// Attachment is in requested state, finish waiting
return true, nil
}
return false, nil
})
return err
}
func (ebs *EBSUtil) waitForAvailable(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
volumeState := aws.StringValue(info.State)
if volumeState != ec2.VolumeStateAvailable {
return false, nil
}
return true, nil
})
return err
}
// Gets the full information about this volume from the EC2 API
func (ebs *EBSUtil) describeVolume(volumeID string) (*ec2.Volume, error) {
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
results := []*ec2.Volume{}
var nextToken *string
for {
response, err := ebs.client.DescribeVolumes(request)
if err != nil {
return nil, err
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
if len(results) == 0 {
return nil, fmt.Errorf("no volumes found")
}
if len(results) > 1 {
return nil, fmt.Errorf("multiple volumes found")
}
return results[0], nil
}
func newEc2Filter(name string, value string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
Values: []*string{
aws.String(value),
},
}
return filter
}
func findInstanceByNodeName(nodeName string, cloud *ec2.EC2) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName),
}
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := describeInstances(request, cloud)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
for {
response, err := cloud.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if nextToken == nil || len(*nextToken) == 0 {
break
}
request.NextToken = nextToken
}
return results, nil
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import "github.com/onsi/ginkgo"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-storage] "+text, body)
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// Result holds the execution result of remote execution command.
type Result struct {
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// LogResult records result log
func LogResult(result Result) {
remote := result.Host
framework.Logf("exec %s: command: %s", remote, result.Cmd)
framework.Logf("exec %s: stdout: %q", remote, result.Stdout)
framework.Logf("exec %s: stderr: %q", remote, result.Stderr)
framework.Logf("exec %s: exit code: %d", remote, result.Code)
}
// HostExec represents interface we require to execute commands on remote host.
type HostExec interface {
Execute(cmd string, node *v1.Node) (Result, error)
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
IssueCommand(cmd string, node *v1.Node) error
Cleanup()
}
// hostExecutor implements HostExec
type hostExecutor struct {
*framework.Framework
nodeExecPods map[string]*v1.Pod
}
// NewHostExec returns a HostExec
func NewHostExec(framework *framework.Framework) HostExec {
return &hostExecutor{
Framework: framework,
nodeExecPods: make(map[string]*v1.Pod),
}
}
// launchNodeExecPod launches a hostexec pod for local PV and waits
// until it's Running.
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
f := h.Framework
cs := f.ClientSet
ns := f.Namespace
hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
// Use NodeAffinity instead of NodeName so that pods will not
// be immediately Failed by kubelet if it's out of space. Instead
// Pods will be pending in the scheduler until there is space freed
// up.
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
Name: "rootfs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
ReadOnly: true,
},
}
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: func(privileged bool) *bool {
return &privileged
}(true),
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
framework.ExpectNoError(err)
return pod
}
// Execute executes the command on the given node. If there is no error
// performing the remote command execution, the stdout, stderr and exit code
// are returned.
// This works like ssh.SSH(...) utility.
func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
result, err := h.exec(cmd, node)
if codeExitErr, ok := err.(exec.CodeExitError); ok {
// extract the exit code of remote command and silence the command
// non-zero exit code error
result.Code = codeExitErr.ExitStatus()
err = nil
}
return result, err
}
func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
result := Result{
Host: node.Name,
Cmd: cmd,
}
pod, ok := h.nodeExecPods[node.Name]
if !ok {
pod = h.launchNodeExecPod(node.Name)
if pod == nil {
return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
}
h.nodeExecPods[node.Name] = pod
}
args := []string{
"nsenter",
"--mount=/rootfs/proc/1/ns/mnt",
"--",
"sh",
"-c",
cmd,
}
containerName := pod.Spec.Containers[0].Name
var err error
result.Stdout, result.Stderr, err = h.Framework.ExecWithOptions(framework.ExecOptions{
Command: args,
Namespace: pod.Namespace,
PodName: pod.Name,
ContainerName: containerName,
Stdin: nil,
CaptureStdout: true,
CaptureStderr: true,
PreserveWhitespace: true,
})
return result, err
}
// IssueCommandWithResult issues command on the given node and returns stdout as
// result. It returns error if there are some issues executing the command or
// the command exits non-zero.
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
result, err := h.exec(cmd, node)
if err != nil {
LogResult(result)
}
return result.Stdout, err
}
// IssueCommand works like IssueCommandWithResult, but discards result.
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
_, err := h.IssueCommandWithResult(cmd, node)
return err
}
// Cleanup cleanup resources it created during test.
// Note that in most cases it is not necessary to call this because we create
// pods under test namespace which will be destroyed in teardown phase.
func (h *hostExecutor) Cleanup() {
for _, pod := range h.nodeExecPods {
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
}
h.nodeExecPods = make(map[string]*v1.Pod)
}

View File

@ -0,0 +1,344 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
/*
* Various local test resource implementations.
*/
import (
"fmt"
"path/filepath"
"strings"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
// block, etc.
type LocalVolumeType string
const (
// LocalVolumeDirectory reprensents a simple directory as local volume
LocalVolumeDirectory LocalVolumeType = "dir"
// LocalVolumeDirectoryLink is like LocalVolumeDirectory but it's a symbolic link to directory
LocalVolumeDirectoryLink LocalVolumeType = "dir-link"
// LocalVolumeDirectoryBindMounted is like LocalVolumeDirectory but bind mounted
LocalVolumeDirectoryBindMounted LocalVolumeType = "dir-bindmounted"
// LocalVolumeDirectoryLinkBindMounted is like LocalVolumeDirectory but it's a symbolic link to self bind mounted directory
// Note that bind mounting at symbolic link actually mounts at directory it
// links to
LocalVolumeDirectoryLinkBindMounted LocalVolumeType = "dir-link-bindmounted"
// LocalVolumeTmpfs represents a temporary filesystem to be used as local volume
LocalVolumeTmpfs LocalVolumeType = "tmpfs"
// LocalVolumeBlock represents a Block device, creates a local file, and maps it as a block device
LocalVolumeBlock LocalVolumeType = "block"
// LocalVolumeBlockFS represents a filesystem backed by a block device
LocalVolumeBlockFS LocalVolumeType = "blockfs"
// LocalVolumeGCELocalSSD represents a Filesystem backed by GCE Local SSD as local volume
LocalVolumeGCELocalSSD LocalVolumeType = "gce-localssd-scsi-fs"
)
// LocalTestResource represents test resource of a local volume.
type LocalTestResource struct {
VolumeType LocalVolumeType
Node *v1.Node
// Volume path, path to filesystem or block device on the node
Path string
// If volume is backed by a loop device, we create loop device storage file
// under this directory.
loopDir string
}
// LocalTestResourceManager represents interface to create/destroy local test resources on node
type LocalTestResourceManager interface {
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
Remove(ltr *LocalTestResource)
}
// ltrMgr implements LocalTestResourceManager
type ltrMgr struct {
prefix string
hostExec HostExec
// hostBase represents a writable directory on the host under which we
// create test directories
hostBase string
}
// NewLocalResourceManager returns a instance of LocalTestResourceManager
func NewLocalResourceManager(prefix string, hostExec HostExec, hostBase string) LocalTestResourceManager {
return &ltrMgr{
prefix: prefix,
hostExec: hostExec,
hostBase: hostBase,
}
}
// getTestDir returns a test dir under `hostBase` directory with randome name.
func (l *ltrMgr) getTestDir() string {
testDirName := fmt.Sprintf("%s-%s", l.prefix, string(uuid.NewUUID()))
return filepath.Join(l.hostBase, testDirName)
}
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
ginkgo.By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
}
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
count := size / 4096
// xfs requires at least 4096 blocks
if count < 4096 {
count = 4096
}
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
losetupCmd := fmt.Sprintf("losetup -f %s/file", dir)
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
framework.ExpectNoError(err)
}
// findLoopDevice finds loop device path by its associated storage directory.
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
framework.ExpectNoError(err)
return strings.TrimSpace(loopDevResult)
}
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
loopDir := l.getTestDir()
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
loopDev := l.findLoopDevice(loopDir, node)
return &LocalTestResource{
Node: node,
Path: loopDev,
loopDir: loopDir,
}
}
// teardownLoopDevice tears down loop device by its associated storage directory.
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
loopDev := l.findLoopDevice(dir, node)
ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
framework.ExpectNoError(err)
return
}
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
ltr := l.setupLocalVolumeBlock(node, parameters)
loopDev := ltr.Path
loopDir := ltr.loopDir
// Format and mount at loopDir and give others rwx for read/write testing
cmd := fmt.Sprintf("mkfs -t ext4 %s && mount -t ext4 %s %s && chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: loopDir,
loopDir: loopDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
umountCmd := fmt.Sprintf("umount %s", ltr.Path)
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
framework.ExpectNoError(err)
l.cleanupLocalVolumeBlock(ltr)
}
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := l.hostExec.IssueCommand(mkdirCmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
removeCmd := fmt.Sprintf("umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
framework.ExpectNoError(err)
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
var ltr *LocalTestResource
switch volumeType {
case LocalVolumeDirectory:
ltr = l.setupLocalVolumeDirectory(node, parameters)
case LocalVolumeDirectoryLink:
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
case LocalVolumeDirectoryBindMounted:
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
case LocalVolumeDirectoryLinkBindMounted:
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
case LocalVolumeTmpfs:
ltr = l.setupLocalVolumeTmpfs(node, parameters)
case LocalVolumeBlock:
ltr = l.setupLocalVolumeBlock(node, parameters)
case LocalVolumeBlockFS:
ltr = l.setupLocalVolumeBlockFS(node, parameters)
case LocalVolumeGCELocalSSD:
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
default:
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
return nil
}
if ltr == nil {
framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
}
ltr.VolumeType = volumeType
return ltr
}
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
switch ltr.VolumeType {
case LocalVolumeDirectory:
l.cleanupLocalVolumeDirectory(ltr)
case LocalVolumeDirectoryLink:
l.cleanupLocalVolumeDirectoryLink(ltr)
case LocalVolumeDirectoryBindMounted:
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
case LocalVolumeDirectoryLinkBindMounted:
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
case LocalVolumeTmpfs:
l.cleanupLocalVolumeTmpfs(ltr)
case LocalVolumeBlock:
l.cleanupLocalVolumeBlock(ltr)
case LocalVolumeBlockFS:
l.cleanupLocalVolumeBlockFS(ltr)
case LocalVolumeGCELocalSSD:
l.cleanupLocalVolumeGCELocalSSD(ltr)
default:
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
}
return
}

View File

@ -0,0 +1,883 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"math/rand"
"path/filepath"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
// KubeletOpt type definition
type KubeletOpt string
const (
// NodeStateTimeout defines Timeout
NodeStateTimeout = 1 * time.Minute
// KStart defines start value
KStart KubeletOpt = "start"
// KStop defines stop value
KStop KubeletOpt = "stop"
// KRestart defines restart value
KRestart KubeletOpt = "restart"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
if framework.NodeOSDistroIs("windows") {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
}
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
} else {
command = "service kubelet status | grep 'Main PID'"
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
}
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
}
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
ginkgo.By("Expecting the volume mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err)
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22"
// Creating command to check whether path exists
podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
}
// Directories in the global directory have unpredictable names, however, device symlinks
// have the same name as pod.UID. So just find anything with pod.UID name.
globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
}
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err, "Failed to delete pod.")
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
ginkgo.By("Expecting the symlinks from global map path not to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
framework.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
}
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
}
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
// StartExternalProvisioner create external provisioner pod
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: imageutils.GetE2EImage(imageutils.NFSProvisioner),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
if teardown {
continue
}
_, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
}
}
// CheckReadWriteToPath check that path can b e read and written
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
// Check that writing to directory as block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
func readFile(content, path string) string {
if framework.NodeOSDistroIs("windows") {
return fmt.Sprintf("Select-String '%s' %s/file1.txt", content, path)
}
return fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)
}
// genBinDataFromSeed generate binData with random seed
func genBinDataFromSeed(len int, seed int64) []byte {
binData := make([]byte, len)
rand.Seed(seed)
_, err := rand.Read(binData)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
return binData
}
// CheckReadFromPath validate that file can be properly read.
//
// Note: directIO does not work with (default) BusyBox Pods. A requirement for
// directIO to function correctly, is to read whole sector(s) for Block-mode
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
// 4096 bytes).
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
var pathForVolMode string
var iflag string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
if directIO {
iflag = "iflag=direct"
}
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
}
// CheckWriteToPath that file can be properly written.
//
// Note: nocache does not work with (default) BusyBox Pods. To read without
// caching, enable directIO with CheckReadFromPath and check the hints about
// the len requirements.
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
var pathForVolMode string
var oflag string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
if nocache {
oflag = "oflag=nocache"
}
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
}
// findMountPoints returns all mount points on given node under specified directory.
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
framework.ExpectNoError(err, "Encountered HostExec error.")
var mountPoints []string
if err != nil {
for _, line := range strings.Split(result, "\n") {
if line == "" {
continue
}
mountPoints = append(mountPoints, strings.TrimSuffix(line, " is a mountpoint"))
}
}
return mountPoints
}
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
}
// CreateDriverNamespace creates a namespace for CSI driver installation.
// The namespace is still tracked and ensured that gets deleted when test terminates.
func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name))
// The driver namespace will be bound to the test namespace in the prefix
namespace, err := f.CreateNamespace(f.Namespace.Name, map[string]string{
"e2e-framework": f.BaseName,
"e2e-test-namespace": f.Namespace.Name,
})
framework.ExpectNoError(err)
if framework.TestContext.VerifyServiceAccount {
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
framework.ExpectNoError(err)
} else {
framework.Logf("Skipping waiting for service account")
}
return namespace
}
// WaitForGVRDeletion waits until a non-namespaced object has been deleted
func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Get(context.TODO(), objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName)
return true
} else if err != nil {
framework.Logf("Get %s returned an error: %v", objectName, err.Error())
} else {
framework.Logf("%s %v has been found and is not deleted", gvr.Resource, objectName)
}
return false
}); successful {
return nil
}
return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
}
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Namespace(ns).Get(context.TODO(), objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns)
return true
} else if err != nil {
framework.Logf("Get %s in namespace %s returned an error: %v", objectName, ns, err.Error())
} else {
framework.Logf("%s %s has been found in namespace %s and is not deleted", gvr.Resource, objectName, ns)
}
return false
}); successful {
return nil
}
return fmt.Errorf("%s %s in namespace %s is not deleted within %v", gvr.Resource, objectName, ns, timeout)
}
// WaitUntil runs checkDone until a timeout is reached
func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
if checkDone() {
framework.Logf("WaitUntil finished successfully after %v", time.Since(start))
return true
}
}
framework.Logf("WaitUntil failed after reaching the timeout %v", timeout)
return false
}
// WaitForGVRFinalizer waits until a object from a given GVR contains a finalizer
// If namespace is empty, assume it is a non-namespaced object
func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName, objectNamespace, finalizer string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for object %s %s of resource %s to contain finalizer %s", timeout, objectNamespace, objectName, gvr.Resource, finalizer)
var (
err error
resource *unstructured.Unstructured
)
if successful := WaitUntil(poll, timeout, func() bool {
switch objectNamespace {
case "":
resource, err = c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
default:
resource, err = c.Resource(gvr).Namespace(objectNamespace).Get(ctx, objectName, metav1.GetOptions{})
}
if err != nil {
framework.Logf("Failed to get object %s %s with err: %v. Will retry in %v", objectNamespace, objectName, err, timeout)
return false
}
for _, f := range resource.GetFinalizers() {
if f == finalizer {
return true
}
}
return false
}); successful {
return nil
}
if err == nil {
err = fmt.Errorf("finalizer %s not added to object %s %s of resource %s", finalizer, objectNamespace, objectName, gvr)
}
return err
}
// VerifyFilePathGidInPod verfies expected GID of the target filepath
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
framework.ExpectEqual(ll[3], expectedGid)
}
// ChangeFilePathGidInPod changes the GID of the target filepath.
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
_, _, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
}