mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2
Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
0e79135419
commit
07b05616a0
159
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
159
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -23,12 +23,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -49,7 +50,7 @@ import (
|
||||
//
|
||||
// LoadFromManifests has some limitations:
|
||||
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
|
||||
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1)
|
||||
// and silently ignored
|
||||
// - the latest stable API version for each item is used, regardless of what
|
||||
// is specified in the manifest files
|
||||
@ -140,21 +141,7 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...
|
||||
// PatchItems has the some limitations as LoadFromManifests:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
|
||||
var destructors []func() error
|
||||
cleanup := func() {
|
||||
// TODO (?): use same logic as framework.go for determining
|
||||
// whether we are expected to clean up? This would change the
|
||||
// meaning of the -delete-namespace and -delete-namespace-on-failure
|
||||
// command line flags, because they would also start to apply
|
||||
// to non-namespaced items.
|
||||
for _, destructor := range destructors {
|
||||
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Logf("deleting failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CreateItems(ctx context.Context, f *framework.Framework, ns *v1.Namespace, items ...interface{}) error {
|
||||
var result error
|
||||
for _, item := range items {
|
||||
// Each factory knows which item(s) it supports, so try each one.
|
||||
@ -164,12 +151,9 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
|
||||
framework.Logf("creating %s", description)
|
||||
for _, factory := range factories {
|
||||
destructor, err := factory.Create(f, ns, item)
|
||||
destructor, err := factory.Create(ctx, f, ns, item)
|
||||
if destructor != nil {
|
||||
destructors = append(destructors, func() error {
|
||||
framework.Logf("deleting %s", description)
|
||||
return destructor()
|
||||
})
|
||||
ginkgo.DeferCleanup(framework.IgnoreNotFound(destructor), framework.AnnotatedLocation(fmt.Sprintf("deleting %s", description)))
|
||||
}
|
||||
if err == nil {
|
||||
done = true
|
||||
@ -185,33 +169,28 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
}
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
cleanup()
|
||||
return nil, result
|
||||
}
|
||||
|
||||
return cleanup, nil
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateFromManifests is a combination of LoadFromManifests,
|
||||
// PatchItems, patching with an optional custom function,
|
||||
// and CreateItems.
|
||||
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
func CreateFromManifests(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error {
|
||||
items, err := LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CreateFromManifests: %w", err)
|
||||
return fmt.Errorf("CreateFromManifests: %w", err)
|
||||
}
|
||||
if err := PatchItems(f, driverNamespace, items...); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if patch != nil {
|
||||
for _, item := range items {
|
||||
if err := patch(item); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return CreateItems(f, driverNamespace, items...)
|
||||
return CreateItems(ctx, f, driverNamespace, items...)
|
||||
}
|
||||
|
||||
// What is a subset of metav1.TypeMeta which (in contrast to
|
||||
@ -251,7 +230,7 @@ type ItemFactory interface {
|
||||
// error or a cleanup function for the created item.
|
||||
// If the item is of an unsupported type, it must return
|
||||
// an error that has errorItemNotSupported as cause.
|
||||
Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error)
|
||||
Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, item interface{}) (func(ctx context.Context) error, error)
|
||||
}
|
||||
|
||||
// describeItem always returns a string that describes the item,
|
||||
@ -410,17 +389,17 @@ func (f *serviceAccountFactory) New() runtime.Object {
|
||||
return &v1.ServiceAccount{}
|
||||
}
|
||||
|
||||
func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*serviceAccountFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ServiceAccount: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -430,7 +409,7 @@ func (f *clusterRoleFactory) New() runtime.Object {
|
||||
return &rbacv1.ClusterRole{}
|
||||
}
|
||||
|
||||
func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*clusterRoleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*rbacv1.ClusterRole)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
@ -438,11 +417,11 @@ func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
|
||||
framework.Logf("Define cluster role %v", item.GetName())
|
||||
client := f.ClientSet.RbacV1().ClusterRoles()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ClusterRole: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -452,18 +431,18 @@ func (f *clusterRoleBindingFactory) New() runtime.Object {
|
||||
return &rbacv1.ClusterRoleBinding{}
|
||||
}
|
||||
|
||||
func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*clusterRoleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*rbacv1.ClusterRoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().ClusterRoleBindings()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ClusterRoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -473,18 +452,18 @@ func (f *roleFactory) New() runtime.Object {
|
||||
return &rbacv1.Role{}
|
||||
}
|
||||
|
||||
func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*roleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*rbacv1.Role)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create Role: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -494,18 +473,18 @@ func (f *roleBindingFactory) New() runtime.Object {
|
||||
return &rbacv1.RoleBinding{}
|
||||
}
|
||||
|
||||
func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*roleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*rbacv1.RoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create RoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -515,18 +494,18 @@ func (f *serviceFactory) New() runtime.Object {
|
||||
return &v1.Service{}
|
||||
}
|
||||
|
||||
func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*serviceFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*v1.Service)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create Service: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -536,18 +515,18 @@ func (f *statefulSetFactory) New() runtime.Object {
|
||||
return &appsv1.StatefulSet{}
|
||||
}
|
||||
|
||||
func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*statefulSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*appsv1.StatefulSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create StatefulSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -557,18 +536,18 @@ func (f *deploymentFactory) New() runtime.Object {
|
||||
return &appsv1.Deployment{}
|
||||
}
|
||||
|
||||
func (*deploymentFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*deploymentFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*appsv1.Deployment)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().Deployments(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create Deployment: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -578,18 +557,18 @@ func (f *daemonSetFactory) New() runtime.Object {
|
||||
return &appsv1.DaemonSet{}
|
||||
}
|
||||
|
||||
func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*daemonSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*appsv1.DaemonSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create DaemonSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -599,18 +578,18 @@ func (f *replicaSetFactory) New() runtime.Object {
|
||||
return &appsv1.ReplicaSet{}
|
||||
}
|
||||
|
||||
func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*replicaSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*appsv1.ReplicaSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().ReplicaSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ReplicaSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -620,18 +599,18 @@ func (f *storageClassFactory) New() runtime.Object {
|
||||
return &storagev1.StorageClass{}
|
||||
}
|
||||
|
||||
func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*storagev1.StorageClass)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1().StorageClasses()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create StorageClass: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -641,18 +620,18 @@ func (f *csiDriverFactory) New() runtime.Object {
|
||||
return &storagev1.CSIDriver{}
|
||||
}
|
||||
|
||||
func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*csiDriverFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*storagev1.CSIDriver)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1().CSIDrivers()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create CSIDriver: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -662,18 +641,18 @@ func (f *secretFactory) New() runtime.Object {
|
||||
return &v1.Secret{}
|
||||
}
|
||||
|
||||
func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*secretFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*v1.Secret)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Secrets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create Secret: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -683,7 +662,7 @@ func (f *customResourceDefinitionFactory) New() runtime.Object {
|
||||
return &apiextensionsv1.CustomResourceDefinition{}
|
||||
}
|
||||
|
||||
func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
func (*customResourceDefinitionFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
var err error
|
||||
unstructCRD := &unstructured.Unstructured{}
|
||||
gvr := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
|
||||
@ -698,11 +677,11 @@ func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Na
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = f.DynamicClient.Resource(gvr).Create(context.TODO(), unstructCRD, metav1.CreateOptions{}); err != nil {
|
||||
if _, err = f.DynamicClient.Resource(gvr).Create(ctx, unstructCRD, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create CustomResourceDefinition: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return f.DynamicClient.Resource(gvr).Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
return func(ctx context.Context) error {
|
||||
return f.DynamicClient.Resource(gvr).Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
@ -94,6 +95,11 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
|
||||
container.VolumeMounts[e].MountPath = substKubeletRootDir(container.VolumeMounts[e].MountPath)
|
||||
}
|
||||
|
||||
if len(o.Features) > 0 && len(o.Features[container.Name]) > 0 {
|
||||
featuregateString := strings.Join(o.Features[container.Name], ",")
|
||||
container.Args = append(container.Args, fmt.Sprintf("--feature-gates=%s", featuregateString))
|
||||
}
|
||||
|
||||
// Overwrite driver name resp. provider name
|
||||
// by appending a parameter with the right
|
||||
// value.
|
||||
@ -218,4 +224,10 @@ type PatchCSIOptions struct {
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
SELinuxMount *bool
|
||||
// If not nil, the values will be used for setting feature arguments to
|
||||
// specific sidecar.
|
||||
// Feature is a map - where key is sidecar name such as:
|
||||
// -- key: resizer
|
||||
// -- value: []string{feature-gates}
|
||||
Features map[string][]string
|
||||
}
|
||||
|
263
vendor/k8s.io/kubernetes/test/e2e/storage/utils/ebs.go
generated
vendored
263
vendor/k8s.io/kubernetes/test/e2e/storage/utils/ebs.go
generated
vendored
@ -1,263 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeAttachmentStatusPollDelay = 2 * time.Second
|
||||
volumeAttachmentStatusFactor = 2
|
||||
volumeAttachmentStatusSteps = 6
|
||||
|
||||
// represents expected attachment status of a volume after attach
|
||||
volumeAttachedStatus = "attached"
|
||||
|
||||
// represents expected attachment status of a volume after detach
|
||||
volumeDetachedStatus = "detached"
|
||||
)
|
||||
|
||||
// EBSUtil provides functions to interact with EBS volumes
|
||||
type EBSUtil struct {
|
||||
client *ec2.EC2
|
||||
validDevices []string
|
||||
}
|
||||
|
||||
// NewEBSUtil returns an instance of EBSUtil which can be used to
|
||||
// to interact with EBS volumes
|
||||
func NewEBSUtil(client *ec2.EC2) *EBSUtil {
|
||||
ebsUtil := &EBSUtil{client: client}
|
||||
validDevices := []string{}
|
||||
for _, firstChar := range []rune{'b', 'c'} {
|
||||
for i := 'a'; i <= 'z'; i++ {
|
||||
dev := string([]rune{firstChar, i})
|
||||
validDevices = append(validDevices, dev)
|
||||
}
|
||||
}
|
||||
ebsUtil.validDevices = validDevices
|
||||
return ebsUtil
|
||||
}
|
||||
|
||||
// AttachDisk attaches an EBS volume to a node.
|
||||
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
|
||||
instance, err := findInstanceByNodeName(nodeName, ebs.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding node %s: %v", nodeName, err)
|
||||
}
|
||||
err = ebs.waitForAvailable(volumeID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
|
||||
}
|
||||
|
||||
device, err := ebs.findFreeDevice(instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
|
||||
}
|
||||
hostDevice := "/dev/xvd" + string(device)
|
||||
attachInput := &ec2.AttachVolumeInput{
|
||||
VolumeId: &volumeID,
|
||||
InstanceId: instance.InstanceId,
|
||||
Device: &hostDevice,
|
||||
}
|
||||
_, err = ebs.client.AttachVolume(attachInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
|
||||
}
|
||||
return ebs.waitForAttach(volumeID)
|
||||
}
|
||||
|
||||
func (ebs *EBSUtil) findFreeDevice(instance *ec2.Instance) (string, error) {
|
||||
deviceMappings := map[string]string{}
|
||||
|
||||
for _, blockDevice := range instance.BlockDeviceMappings {
|
||||
name := aws.StringValue(blockDevice.DeviceName)
|
||||
name = strings.TrimPrefix(name, "/dev/sd")
|
||||
name = strings.TrimPrefix(name, "/dev/xvd")
|
||||
if len(name) < 1 || len(name) > 2 {
|
||||
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
|
||||
}
|
||||
|
||||
deviceMappings[name] = aws.StringValue(blockDevice.Ebs.VolumeId)
|
||||
}
|
||||
|
||||
for _, device := range ebs.validDevices {
|
||||
if _, found := deviceMappings[device]; !found {
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no available device")
|
||||
}
|
||||
|
||||
func (ebs *EBSUtil) waitForAttach(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: volumeAttachmentStatusPollDelay,
|
||||
Factor: volumeAttachmentStatusFactor,
|
||||
Steps: volumeAttachmentStatusSteps,
|
||||
}
|
||||
time.Sleep(volumeAttachmentStatusPollDelay)
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
info, err := ebs.describeVolume(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(info.Attachments) > 1 {
|
||||
// Shouldn't happen; log so we know if it is
|
||||
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
|
||||
}
|
||||
attachmentStatus := ""
|
||||
for _, a := range info.Attachments {
|
||||
if attachmentStatus != "" {
|
||||
// Shouldn't happen; log so we know if it is
|
||||
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
|
||||
}
|
||||
if a.State != nil {
|
||||
attachmentStatus = *a.State
|
||||
} else {
|
||||
// Shouldn't happen; log so we know if it is
|
||||
klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a)
|
||||
}
|
||||
}
|
||||
if attachmentStatus == "" {
|
||||
attachmentStatus = volumeDetachedStatus
|
||||
}
|
||||
if attachmentStatus == volumeAttachedStatus {
|
||||
// Attachment is in requested state, finish waiting
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (ebs *EBSUtil) waitForAvailable(volumeID string) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: volumeAttachmentStatusPollDelay,
|
||||
Factor: volumeAttachmentStatusFactor,
|
||||
Steps: volumeAttachmentStatusSteps,
|
||||
}
|
||||
time.Sleep(volumeAttachmentStatusPollDelay)
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
info, err := ebs.describeVolume(volumeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
volumeState := aws.StringValue(info.State)
|
||||
if volumeState != ec2.VolumeStateAvailable {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Gets the full information about this volume from the EC2 API
|
||||
func (ebs *EBSUtil) describeVolume(volumeID string) (*ec2.Volume, error) {
|
||||
request := &ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeID},
|
||||
}
|
||||
|
||||
results := []*ec2.Volume{}
|
||||
var nextToken *string
|
||||
for {
|
||||
response, err := ebs.client.DescribeVolumes(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results = append(results, response.Volumes...)
|
||||
|
||||
nextToken = response.NextToken
|
||||
if aws.StringValue(nextToken) == "" {
|
||||
break
|
||||
}
|
||||
request.NextToken = nextToken
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
return nil, fmt.Errorf("no volumes found")
|
||||
}
|
||||
if len(results) > 1 {
|
||||
return nil, fmt.Errorf("multiple volumes found")
|
||||
}
|
||||
return results[0], nil
|
||||
}
|
||||
|
||||
func newEc2Filter(name string, value string) *ec2.Filter {
|
||||
filter := &ec2.Filter{
|
||||
Name: aws.String(name),
|
||||
Values: []*string{
|
||||
aws.String(value),
|
||||
},
|
||||
}
|
||||
return filter
|
||||
}
|
||||
|
||||
func findInstanceByNodeName(nodeName string, cloud *ec2.EC2) (*ec2.Instance, error) {
|
||||
filters := []*ec2.Filter{
|
||||
newEc2Filter("private-dns-name", nodeName),
|
||||
}
|
||||
|
||||
request := &ec2.DescribeInstancesInput{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
instances, err := describeInstances(request, cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(instances) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(instances) > 1 {
|
||||
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
|
||||
}
|
||||
return instances[0], nil
|
||||
}
|
||||
|
||||
func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*ec2.Instance, error) {
|
||||
// Instances are paged
|
||||
results := []*ec2.Instance{}
|
||||
var nextToken *string
|
||||
|
||||
for {
|
||||
response, err := cloud.DescribeInstances(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AWS instances: %v", err)
|
||||
}
|
||||
|
||||
for _, reservation := range response.Reservations {
|
||||
results = append(results, reservation.Instances...)
|
||||
}
|
||||
|
||||
nextToken = response.NextToken
|
||||
if nextToken == nil || len(*nextToken) == 0 {
|
||||
break
|
||||
}
|
||||
request.NextToken = nextToken
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
51
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
51
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
@ -47,10 +47,10 @@ func LogResult(result Result) {
|
||||
|
||||
// HostExec represents interface we require to execute commands on remote host.
|
||||
type HostExec interface {
|
||||
Execute(cmd string, node *v1.Node) (Result, error)
|
||||
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
|
||||
IssueCommand(cmd string, node *v1.Node) error
|
||||
Cleanup()
|
||||
Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error)
|
||||
IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error)
|
||||
IssueCommand(ctx context.Context, cmd string, node *v1.Node) error
|
||||
Cleanup(ctx context.Context)
|
||||
}
|
||||
|
||||
// hostExecutor implements HostExec
|
||||
@ -69,18 +69,25 @@ func NewHostExec(framework *framework.Framework) HostExec {
|
||||
|
||||
// launchNodeExecPod launches a hostexec pod for local PV and waits
|
||||
// until it's Running.
|
||||
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.Pod {
|
||||
f := h.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
|
||||
hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
|
||||
// Use NodeAffinity instead of NodeName so that pods will not
|
||||
// be immediately Failed by kubelet if it's out of space. Instead
|
||||
// Pods will be pending in the scheduler until there is space freed
|
||||
// up.
|
||||
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
|
||||
|
||||
if framework.TestContext.NodeE2E {
|
||||
// E2E node tests do not run a scheduler, so set the node name directly
|
||||
hostExecPod.Spec.NodeName = node
|
||||
} else {
|
||||
// Use NodeAffinity instead of NodeName so that pods will not
|
||||
// be immediately Failed by kubelet if it's out of space. Instead
|
||||
// Pods will be pending in the scheduler until there is space freed
|
||||
// up.
|
||||
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
|
||||
|
||||
}
|
||||
hostExecPod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
// Required to enter into host mount namespace via nsenter.
|
||||
@ -104,9 +111,9 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
return &privileged
|
||||
}(true),
|
||||
}
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
@ -115,8 +122,8 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
// performing the remote command execution, the stdout, stderr and exit code
|
||||
// are returned.
|
||||
// This works like ssh.SSH(...) utility.
|
||||
func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
|
||||
result, err := h.exec(cmd, node)
|
||||
func (h *hostExecutor) Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
|
||||
result, err := h.exec(ctx, cmd, node)
|
||||
if codeExitErr, ok := err.(exec.CodeExitError); ok {
|
||||
// extract the exit code of remote command and silence the command
|
||||
// non-zero exit code error
|
||||
@ -126,14 +133,14 @@ func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
func (h *hostExecutor) exec(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
|
||||
result := Result{
|
||||
Host: node.Name,
|
||||
Cmd: cmd,
|
||||
}
|
||||
pod, ok := h.nodeExecPods[node.Name]
|
||||
if !ok {
|
||||
pod = h.launchNodeExecPod(node.Name)
|
||||
pod = h.launchNodeExecPod(ctx, node.Name)
|
||||
if pod == nil {
|
||||
return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
|
||||
}
|
||||
@ -165,8 +172,8 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
// IssueCommandWithResult issues command on the given node and returns stdout as
|
||||
// result. It returns error if there are some issues executing the command or
|
||||
// the command exits non-zero.
|
||||
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
|
||||
result, err := h.exec(cmd, node)
|
||||
func (h *hostExecutor) IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) {
|
||||
result, err := h.exec(ctx, cmd, node)
|
||||
if err != nil {
|
||||
LogResult(result)
|
||||
}
|
||||
@ -174,17 +181,17 @@ func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string
|
||||
}
|
||||
|
||||
// IssueCommand works like IssueCommandWithResult, but discards result.
|
||||
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
|
||||
_, err := h.IssueCommandWithResult(cmd, node)
|
||||
func (h *hostExecutor) IssueCommand(ctx context.Context, cmd string, node *v1.Node) error {
|
||||
_, err := h.IssueCommandWithResult(ctx, cmd, node)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup cleanup resources it created during test.
|
||||
// Note that in most cases it is not necessary to call this because we create
|
||||
// pods under test namespace which will be destroyed in teardown phase.
|
||||
func (h *hostExecutor) Cleanup() {
|
||||
func (h *hostExecutor) Cleanup(ctx context.Context) {
|
||||
for _, pod := range h.nodeExecPods {
|
||||
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
e2epod.DeletePodOrFail(ctx, h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
}
|
||||
h.nodeExecPods = make(map[string]*v1.Pod)
|
||||
}
|
||||
|
141
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
141
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -21,6 +21,7 @@ package utils
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -69,9 +70,9 @@ type LocalTestResource struct {
|
||||
|
||||
// LocalTestResourceManager represents interface to create/destroy local test resources on node
|
||||
type LocalTestResourceManager interface {
|
||||
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
||||
ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error
|
||||
Remove(ltr *LocalTestResource)
|
||||
Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
||||
ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error
|
||||
Remove(ctx context.Context, ltr *LocalTestResource)
|
||||
}
|
||||
|
||||
// ltrMgr implements LocalTestResourceManager
|
||||
@ -98,10 +99,10 @@ func (l *ltrMgr) getTestDir() string {
|
||||
return filepath.Join(l.hostBase, testDirName)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeTmpfs(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
|
||||
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
|
||||
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -109,18 +110,18 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ctx context.Context, ltr *LocalTestResource) {
|
||||
ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
|
||||
err := l.hostExec.IssueCommand(fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Removing the test directory")
|
||||
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
|
||||
err = l.hostExec.IssueCommand(ctx, fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
|
||||
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
|
||||
func (l *ltrMgr) createAndSetupLoopDevice(ctx context.Context, dir string, node *v1.Node, size int) {
|
||||
ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
|
||||
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
|
||||
count := size / 4096
|
||||
@ -130,22 +131,22 @@ func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
|
||||
}
|
||||
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
|
||||
losetupCmd := fmt.Sprintf("losetup -f %s/file", dir)
|
||||
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
|
||||
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// findLoopDevice finds loop device path by its associated storage directory.
|
||||
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
|
||||
func (l *ltrMgr) findLoopDevice(ctx context.Context, dir string, node *v1.Node) string {
|
||||
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
|
||||
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
|
||||
loopDevResult, err := l.hostExec.IssueCommandWithResult(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return strings.TrimSpace(loopDevResult)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeBlock(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
loopDir := l.getTestDir()
|
||||
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
|
||||
loopDev := l.findLoopDevice(loopDir, node)
|
||||
l.createAndSetupLoopDevice(ctx, loopDir, node, 20*1024*1024)
|
||||
loopDev := l.findLoopDevice(ctx, loopDir, node)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
Path: loopDev,
|
||||
@ -154,30 +155,30 @@ func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]stri
|
||||
}
|
||||
|
||||
// teardownLoopDevice tears down loop device by its associated storage directory.
|
||||
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
|
||||
loopDev := l.findLoopDevice(dir, node)
|
||||
func (l *ltrMgr) teardownLoopDevice(ctx context.Context, dir string, node *v1.Node) {
|
||||
loopDev := l.findLoopDevice(ctx, dir, node)
|
||||
ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
|
||||
losetupDeleteCmd := fmt.Sprintf("losetup -d %s", loopDev)
|
||||
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, losetupDeleteCmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
|
||||
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
|
||||
func (l *ltrMgr) cleanupLocalVolumeBlock(ctx context.Context, ltr *LocalTestResource) {
|
||||
l.teardownLoopDevice(ctx, ltr.loopDir, ltr.Node)
|
||||
ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
|
||||
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
ltr := l.setupLocalVolumeBlock(node, parameters)
|
||||
func (l *ltrMgr) setupLocalVolumeBlockFS(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
ltr := l.setupLocalVolumeBlock(ctx, node, parameters)
|
||||
loopDev := ltr.Path
|
||||
loopDir := ltr.loopDir
|
||||
// Format and mount at loopDir and give others rwx for read/write testing
|
||||
cmd := fmt.Sprintf("mkfs -t ext4 %s && mount -t ext4 %s %s && chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
|
||||
err := l.hostExec.IssueCommand(cmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -186,17 +187,17 @@ func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]st
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource) {
|
||||
umountCmd := fmt.Sprintf("umount %s", ltr.Path)
|
||||
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, umountCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
l.cleanupLocalVolumeBlock(ltr)
|
||||
l.cleanupLocalVolumeBlock(ctx, ltr)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeDirectory(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
|
||||
err := l.hostExec.IssueCommand(mkdirCmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, mkdirCmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -204,18 +205,18 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectory(ctx context.Context, ltr *LocalTestResource) {
|
||||
ginkgo.By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLink(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
|
||||
err := l.hostExec.IssueCommand(cmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -223,19 +224,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ctx context.Context, ltr *LocalTestResource) {
|
||||
ginkgo.By("Removing the test directory")
|
||||
hostDir := ltr.Path
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir)
|
||||
err := l.hostExec.IssueCommand(cmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -243,19 +244,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ctx context.Context, ltr *LocalTestResource) {
|
||||
ginkgo.By("Removing the test directory")
|
||||
hostDir := ltr.Path
|
||||
removeCmd := fmt.Sprintf("umount %s && rm -r %s", hostDir, hostDir)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
|
||||
err := l.hostExec.IssueCommand(cmd, node)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
Node: node,
|
||||
@ -263,17 +264,17 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, ltr *LocalTestResource) {
|
||||
ginkgo.By("Removing the test directory")
|
||||
hostDir := ltr.Path
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
removeCmd := fmt.Sprintf("rm %s && umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
|
||||
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
res, err := l.hostExec.IssueCommandWithResult(ctx, "ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
|
||||
framework.ExpectNoError(err)
|
||||
dirName := strings.Fields(res)[0]
|
||||
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
|
||||
@ -283,47 +284,47 @@ func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[strin
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ctx context.Context, ltr *LocalTestResource) {
|
||||
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
|
||||
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
|
||||
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
|
||||
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error {
|
||||
func (l *ltrMgr) expandLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error {
|
||||
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd)
|
||||
loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node)
|
||||
loopDev := l.findLoopDevice(ctx, ltr.loopDir, ltr.Node)
|
||||
losetupCmd := fmt.Sprintf("losetup -c %s", loopDev)
|
||||
return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
|
||||
return l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error {
|
||||
func (l *ltrMgr) ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbtoAdd int) error {
|
||||
switch ltr.VolumeType {
|
||||
case LocalVolumeBlockFS:
|
||||
return l.expandLocalVolumeBlockFS(ltr, mbtoAdd)
|
||||
return l.expandLocalVolumeBlockFS(ctx, ltr, mbtoAdd)
|
||||
}
|
||||
return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
||||
func (l *ltrMgr) Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
||||
var ltr *LocalTestResource
|
||||
switch volumeType {
|
||||
case LocalVolumeDirectory:
|
||||
ltr = l.setupLocalVolumeDirectory(node, parameters)
|
||||
ltr = l.setupLocalVolumeDirectory(ctx, node, parameters)
|
||||
case LocalVolumeDirectoryLink:
|
||||
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
|
||||
ltr = l.setupLocalVolumeDirectoryLink(ctx, node, parameters)
|
||||
case LocalVolumeDirectoryBindMounted:
|
||||
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
|
||||
ltr = l.setupLocalVolumeDirectoryBindMounted(ctx, node, parameters)
|
||||
case LocalVolumeDirectoryLinkBindMounted:
|
||||
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
|
||||
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(ctx, node, parameters)
|
||||
case LocalVolumeTmpfs:
|
||||
ltr = l.setupLocalVolumeTmpfs(node, parameters)
|
||||
ltr = l.setupLocalVolumeTmpfs(ctx, node, parameters)
|
||||
case LocalVolumeBlock:
|
||||
ltr = l.setupLocalVolumeBlock(node, parameters)
|
||||
ltr = l.setupLocalVolumeBlock(ctx, node, parameters)
|
||||
case LocalVolumeBlockFS:
|
||||
ltr = l.setupLocalVolumeBlockFS(node, parameters)
|
||||
ltr = l.setupLocalVolumeBlockFS(ctx, node, parameters)
|
||||
case LocalVolumeGCELocalSSD:
|
||||
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
|
||||
ltr = l.setupLocalVolumeGCELocalSSD(ctx, node, parameters)
|
||||
default:
|
||||
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
|
||||
return nil
|
||||
@ -335,24 +336,24 @@ func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters ma
|
||||
return ltr
|
||||
}
|
||||
|
||||
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
|
||||
func (l *ltrMgr) Remove(ctx context.Context, ltr *LocalTestResource) {
|
||||
switch ltr.VolumeType {
|
||||
case LocalVolumeDirectory:
|
||||
l.cleanupLocalVolumeDirectory(ltr)
|
||||
l.cleanupLocalVolumeDirectory(ctx, ltr)
|
||||
case LocalVolumeDirectoryLink:
|
||||
l.cleanupLocalVolumeDirectoryLink(ltr)
|
||||
l.cleanupLocalVolumeDirectoryLink(ctx, ltr)
|
||||
case LocalVolumeDirectoryBindMounted:
|
||||
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
|
||||
l.cleanupLocalVolumeDirectoryBindMounted(ctx, ltr)
|
||||
case LocalVolumeDirectoryLinkBindMounted:
|
||||
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
|
||||
l.cleanupLocalVolumeDirectoryLinkBindMounted(ctx, ltr)
|
||||
case LocalVolumeTmpfs:
|
||||
l.cleanupLocalVolumeTmpfs(ltr)
|
||||
l.cleanupLocalVolumeTmpfs(ctx, ltr)
|
||||
case LocalVolumeBlock:
|
||||
l.cleanupLocalVolumeBlock(ltr)
|
||||
l.cleanupLocalVolumeBlock(ctx, ltr)
|
||||
case LocalVolumeBlockFS:
|
||||
l.cleanupLocalVolumeBlockFS(ltr)
|
||||
l.cleanupLocalVolumeBlockFS(ctx, ltr)
|
||||
case LocalVolumeGCELocalSSD:
|
||||
l.cleanupLocalVolumeGCELocalSSD(ltr)
|
||||
l.cleanupLocalVolumeGCELocalSSD(ctx, ltr)
|
||||
default:
|
||||
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -43,8 +43,8 @@ import (
|
||||
//
|
||||
// The output goes to log files (when using --report-dir, as in the
|
||||
// CI) or the output stream (otherwise).
|
||||
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
cs := f.ClientSet
|
||||
|
||||
ns := driverNamespace.Name
|
||||
@ -103,17 +103,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
// - If `systemctl` returns stderr "command not found, issues the command via `service`
|
||||
// - If `service` also returns stderr "command not found", the test is aborted.
|
||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
func KubeletCommand(ctx context.Context, kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command := ""
|
||||
systemctlPresent := false
|
||||
kubeletPid := ""
|
||||
|
||||
nodeIP, err := getHostAddress(c, pod)
|
||||
nodeIP, err := getHostAddress(ctx, c, pod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
framework.Logf("Checking if systemctl command is present")
|
||||
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(ctx, "systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
@ -122,23 +122,23 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||
}
|
||||
|
||||
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
|
||||
sudoPresent := isSudoPresent(ctx, nodeIP, framework.TestContext.Provider)
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
|
||||
if kOp == KRestart {
|
||||
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
kubeletPid = getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
|
||||
}
|
||||
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err = e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||
|
||||
if kOp == KStop {
|
||||
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
if ok := e2enode.WaitForNodeToBeNotReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
@ -146,7 +146,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
// Wait for a minute to check if kubelet Pid is getting changed
|
||||
isPidChanged := false
|
||||
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
|
||||
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
if ctx.Err() != nil {
|
||||
framework.Fail("timed out waiting for Kubelet POD change")
|
||||
}
|
||||
kubeletPidAfterRestart := getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
|
||||
if kubeletPid != kubeletPidAfterRestart {
|
||||
isPidChanged = true
|
||||
break
|
||||
@ -161,7 +164,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
}
|
||||
if kOp == KStart || kOp == KRestart {
|
||||
// For kubelet start and restart operations, Wait until Node becomes Ready
|
||||
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
@ -170,8 +173,8 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
// getHostAddress gets the node for a pod and returns the first
|
||||
// address. Returns an error if the node the pod is on doesn't have an
|
||||
// address.
|
||||
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
|
||||
func getHostAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (string, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -48,11 +48,11 @@ var (
|
||||
)
|
||||
|
||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
func WaitForSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
|
||||
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(ctx, snapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
|
||||
return false
|
||||
@ -80,12 +80,12 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
|
||||
func GetSnapshotContentFromSnapshot(ctx context.Context, dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
|
||||
err := WaitForSnapshotReady(ctx, dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(ctx, snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
@ -93,7 +93,7 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
vscontent, err := dc.Resource(SnapshotContentGVR).Get(ctx, snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
@ -101,9 +101,9 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
|
||||
}
|
||||
|
||||
// DeleteSnapshotWithoutWaiting deletes a VolumeSnapshot and return directly without waiting
|
||||
func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName string) error {
|
||||
func DeleteSnapshotWithoutWaiting(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string) error {
|
||||
ginkgo.By("deleting the snapshot")
|
||||
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
|
||||
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(ctx, snapshotName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@ -111,15 +111,15 @@ func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName
|
||||
}
|
||||
|
||||
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
|
||||
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
func DeleteAndWaitSnapshot(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
var err error
|
||||
err = DeleteSnapshotWithoutWaiting(dc, ns, snapshotName)
|
||||
err = DeleteSnapshotWithoutWaiting(ctx, dc, ns, snapshotName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
err = WaitForNamespacedGVRDeletion(ctx, dc, SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
139
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
139
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -75,7 +75,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
|
||||
}
|
||||
|
||||
// getKubeletMainPid return the Main PID of the Kubelet Process
|
||||
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
|
||||
func getKubeletMainPid(ctx context.Context, nodeIP string, sudoPresent bool, systemctlPresent bool) string {
|
||||
command := ""
|
||||
if systemctlPresent {
|
||||
command = "systemctl status kubelet | grep 'Main PID'"
|
||||
@ -86,7 +86,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
|
||||
@ -95,7 +95,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
@ -103,7 +103,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
@ -112,7 +112,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
@ -120,7 +120,7 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
@ -132,20 +132,20 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
// checkSubpath is true indicating whether the subpath should be checked.
|
||||
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
|
||||
nodeIP, err := getHostAddress(ctx, c, clientPod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
ginkgo.By("Expecting the volume mount to be found.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
if checkSubpath {
|
||||
ginkgo.By("Expecting the volume subpath mount to be found.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
@ -157,11 +157,9 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
defer func() {
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
}()
|
||||
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
|
||||
ginkgo.By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
KubeletCommand(ctx, KStop, c, clientPod)
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Starting the second pod")
|
||||
@ -171,15 +169,15 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||
if forceDelete {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
} else {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
KubeletCommand(ctx, KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||
}
|
||||
@ -192,7 +190,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Waiting for the second pod.")
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(ctx, c, secondPod)
|
||||
framework.ExpectNoError(err, "while waiting for the second pod Running")
|
||||
|
||||
ginkgo.By("Getting the second pod uuid.")
|
||||
@ -200,7 +198,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
framework.ExpectNoError(err, "getting the second UID")
|
||||
|
||||
ginkgo.By("Expecting the volume mount to be found in the second pod.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
@ -209,12 +207,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "when deleting the second pod")
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the volume mount not to be found.")
|
||||
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||
@ -222,7 +220,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
|
||||
if checkSubpath {
|
||||
ginkgo.By("Expecting the volume subpath mount not to be found.")
|
||||
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
@ -232,64 +230,62 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
|
||||
func TestVolumeUnmountsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
|
||||
func TestVolumeUnmountsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
|
||||
nodeIP, err := getHostAddress(ctx, c, clientPod)
|
||||
framework.ExpectNoError(err, "Failed to get nodeIP.")
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
// Creating command to check whether path exists
|
||||
podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
|
||||
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
|
||||
if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
|
||||
podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
|
||||
}
|
||||
// Directories in the global directory have unpredictable names, however, device symlinks
|
||||
// have the same name as pod.UID. So just find anything with pod.UID name.
|
||||
globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
|
||||
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
|
||||
if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
|
||||
globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
|
||||
result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
ginkgo.By("Expecting the symlinks from global map path to be found.")
|
||||
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
defer func() {
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
}()
|
||||
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
|
||||
ginkgo.By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
KubeletCommand(ctx, KStop, c, clientPod)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||
if forceDelete {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
} else {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
framework.ExpectNoError(err, "Failed to delete pod.")
|
||||
|
||||
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
KubeletCommand(ctx, KStart, c, clientPod)
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "Expected pod to be not found.")
|
||||
|
||||
if forceDelete {
|
||||
@ -299,13 +295,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
|
||||
result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
|
||||
|
||||
ginkgo.By("Expecting the symlinks from global map path not to be found.")
|
||||
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
|
||||
@ -314,17 +310,17 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
|
||||
func TestVolumeUnmapsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, devicePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
|
||||
func TestVolumeUnmapsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, devicePath)
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
|
||||
func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
@ -362,16 +358,14 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns,
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(c, ns, pod.Name)
|
||||
}()
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
|
||||
ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
|
||||
}
|
||||
|
||||
// StartExternalProvisioner create external provisioner pod
|
||||
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
|
||||
func StartExternalProvisioner(ctx context.Context, c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
|
||||
podClient := c.CoreV1().Pods(ns)
|
||||
|
||||
provisionerPod := &v1.Pod{
|
||||
@ -432,21 +426,21 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
|
||||
},
|
||||
},
|
||||
}
|
||||
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
|
||||
provisionerPod, err := podClient.Create(ctx, provisionerPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, c, provisionerPod))
|
||||
|
||||
ginkgo.By("locating the provisioner pod")
|
||||
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
|
||||
pod, err := podClient.Get(ctx, provisionerPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool {
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
sshResult, err := e2essh.SSH(ctx, "sudo --version", nodeIP, provider)
|
||||
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
return true
|
||||
@ -562,8 +556,8 @@ func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
|
||||
func findMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
result, err := hostExec.IssueCommandWithResult(ctx, fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
|
||||
framework.ExpectNoError(err, "Encountered HostExec error.")
|
||||
var mountPoints []string
|
||||
if err != nil {
|
||||
@ -578,16 +572,16 @@ func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
}
|
||||
|
||||
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
|
||||
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
|
||||
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
|
||||
func FindVolumeGlobalMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node) sets.String {
|
||||
return sets.NewString(findMountPoints(ctx, hostExec, node, "/var/lib/kubelet/plugins")...)
|
||||
}
|
||||
|
||||
// CreateDriverNamespace creates a namespace for CSI driver installation.
|
||||
// The namespace is still tracked and ensured that gets deleted when test terminates.
|
||||
func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
|
||||
func CreateDriverNamespace(ctx context.Context, f *framework.Framework) *v1.Namespace {
|
||||
ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name))
|
||||
// The driver namespace will be bound to the test namespace in the prefix
|
||||
namespace, err := f.CreateNamespace(f.Namespace.Name, map[string]string{
|
||||
namespace, err := f.CreateNamespace(ctx, f.Namespace.Name, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
"e2e-test-namespace": f.Namespace.Name,
|
||||
})
|
||||
@ -595,7 +589,7 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
|
||||
|
||||
if framework.TestContext.VerifyServiceAccount {
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
framework.Logf("Skipping waiting for service account")
|
||||
@ -604,11 +598,11 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
|
||||
}
|
||||
|
||||
// WaitForGVRDeletion waits until a non-namespaced object has been deleted
|
||||
func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
|
||||
func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
_, err := c.Resource(gvr).Get(context.TODO(), objectName, metav1.GetOptions{})
|
||||
_, err := c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName)
|
||||
return true
|
||||
@ -627,11 +621,11 @@ func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ob
|
||||
}
|
||||
|
||||
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
|
||||
func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
|
||||
func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
_, err := c.Resource(gvr).Namespace(ns).Get(context.TODO(), objectName, metav1.GetOptions{})
|
||||
_, err := c.Resource(gvr).Namespace(ns).Get(ctx, objectName, metav1.GetOptions{})
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns)
|
||||
return true
|
||||
@ -651,6 +645,7 @@ func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionRe
|
||||
|
||||
// WaitUntil runs checkDone until a timeout is reached
|
||||
func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
|
||||
// TODO (pohly): replace with gomega.Eventually
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
if checkDone() {
|
||||
framework.Logf("WaitUntil finished successfully after %v", time.Since(start))
|
||||
@ -716,8 +711,8 @@ func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string,
|
||||
}
|
||||
|
||||
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||
func DeleteStorageClass(cs clientset.Interface, className string) error {
|
||||
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
|
||||
func DeleteStorageClass(ctx context.Context, cs clientset.Interface, className string) error {
|
||||
err := cs.StorageV1().StorageClasses().Delete(ctx, className, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user