mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes and libraries to v1.22.0 version
Kubernetes v1.22 version has been released and this update ceph csi dependencies to use the same version. Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
e077c1fdf5
commit
aa698bc3e1
88
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
88
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
@ -33,8 +33,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -57,11 +56,18 @@ type LogOutput struct {
|
||||
// Matches harmless errors from pkg/kubelet/kubelet_pods.go.
|
||||
var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|waiting to start|not available)|the server could not find the requested resource`)
|
||||
|
||||
// CopyAllLogs follows the logs of all containers in all pods,
|
||||
// CopyPodLogs is basically CopyPodLogs for all current or future pods in the given namespace ns
|
||||
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
|
||||
return CopyPodLogs(ctx, cs, ns, "", to)
|
||||
}
|
||||
|
||||
// CopyPodLogs follows the logs of all containers in pod with the given podName,
|
||||
// including those that get created in the future, and writes each log
|
||||
// line as configured in the output options. It does that until the
|
||||
// context is done or until an error occurs.
|
||||
//
|
||||
// If podName is empty, it will follow all pods in the given namespace ns.
|
||||
//
|
||||
// Beware that there is currently no way to force log collection
|
||||
// before removing pods, which means that there is a known race
|
||||
// between "stop pod" and "collecting log entries". The alternative
|
||||
@ -79,10 +85,17 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w
|
||||
// But it turned out to be rather confusing, so now a heuristic is used: if
|
||||
// log output of a container was already captured, then capturing does not
|
||||
// resume if the pod is marked for deletion.
|
||||
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
|
||||
func CopyPodLogs(ctx context.Context, cs clientset.Interface, ns, podName string, to LogOutput) error {
|
||||
options := meta.ListOptions{}
|
||||
if podName != "" {
|
||||
options = meta.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", podName),
|
||||
}
|
||||
}
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
return fmt.Errorf("cannot create Pod event watcher: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
@ -96,7 +109,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{})
|
||||
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
if to.StatusWriter != nil {
|
||||
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
|
||||
@ -252,18 +265,42 @@ func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opt
|
||||
}
|
||||
|
||||
// WatchPods prints pod status events for a certain namespace or all namespaces
|
||||
// when namespace name is empty.
|
||||
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
|
||||
// when namespace name is empty. The closer can be nil if the caller doesn't want
|
||||
// the file to be closed when watching stops.
|
||||
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer, toCloser io.Closer) (finalErr error) {
|
||||
defer func() {
|
||||
if finalErr != nil && toCloser != nil {
|
||||
toCloser.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
pods, err := cs.CoreV1().Pods(ns).Watch(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
return fmt.Errorf("cannot create Pod watcher: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if finalErr != nil {
|
||||
pods.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
events, err := cs.CoreV1().Events(ns).Watch(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create Event watcher: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer watcher.Stop()
|
||||
defer func() {
|
||||
pods.Stop()
|
||||
events.Stop()
|
||||
if toCloser != nil {
|
||||
toCloser.Close()
|
||||
}
|
||||
}()
|
||||
timeFormat := "15:04:05.000"
|
||||
for {
|
||||
select {
|
||||
case e := <-watcher.ResultChan():
|
||||
case e := <-pods.ResultChan():
|
||||
if e.Object == nil {
|
||||
continue
|
||||
}
|
||||
@ -274,7 +311,8 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
fmt.Fprintf(buffer,
|
||||
"pod event: %s: %s/%s %s: %s %s\n",
|
||||
"%s pod: %s: %s/%s %s: %s %s\n",
|
||||
time.Now().Format(timeFormat),
|
||||
e.Type,
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
@ -300,7 +338,29 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
fmt.Fprintf(buffer, "\n")
|
||||
}
|
||||
to.Write(buffer.Bytes())
|
||||
case e := <-events.ResultChan():
|
||||
if e.Object == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
event, ok := e.Object.(*v1.Event)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
to.Write([]byte(fmt.Sprintf("%s event: %s/%s %s: %s %s: %s (%v - %v)\n",
|
||||
time.Now().Format(timeFormat),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Name,
|
||||
event.Source.Component,
|
||||
event.Type,
|
||||
event.Message,
|
||||
event.FirstTimestamp,
|
||||
event.LastTimestamp,
|
||||
)))
|
||||
case <-ctx.Done():
|
||||
to.Write([]byte(fmt.Sprintf("%s ==== stopping pod watch ====\n",
|
||||
time.Now().Format(timeFormat))))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -20,10 +20,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@ -58,17 +57,17 @@ func LoadFromManifests(files ...string) ([]interface{}, error) {
|
||||
// Ignore any additional fields for now, just determine what we have.
|
||||
var what What
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
|
||||
return errors.Wrap(err, "decode TypeMeta")
|
||||
return fmt.Errorf("decode TypeMeta: %w", err)
|
||||
}
|
||||
|
||||
factory := factories[what]
|
||||
if factory == nil {
|
||||
return errors.Errorf("item of type %+v not supported", what)
|
||||
return fmt.Errorf("item of type %+v not supported", what)
|
||||
}
|
||||
|
||||
object := factory.New()
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, object); err != nil {
|
||||
return errors.Wrapf(err, "decode %+v", what)
|
||||
return fmt.Errorf("decode %+v: %w", what, err)
|
||||
}
|
||||
items = append(items, object)
|
||||
return nil
|
||||
@ -96,7 +95,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
|
||||
for _, item := range items {
|
||||
if err := cb(item); err != nil {
|
||||
return errors.Wrap(err, fileName)
|
||||
return fmt.Errorf("%s: %w", fileName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,13 +172,13 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
if err == nil {
|
||||
done = true
|
||||
break
|
||||
} else if errors.Cause(err) != errorItemNotSupported {
|
||||
} else if !errors.Is(err, errorItemNotSupported) {
|
||||
result = err
|
||||
break
|
||||
}
|
||||
}
|
||||
if result == nil && !done {
|
||||
result = errors.Errorf("item of type %T not supported", item)
|
||||
result = fmt.Errorf("item of type %T not supported", item)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -198,7 +197,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
items, err := LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateFromManifests")
|
||||
return nil, fmt.Errorf("CreateFromManifests: %w", err)
|
||||
}
|
||||
if err := PatchItems(f, driverNamespace, items...); err != nil {
|
||||
return nil, err
|
||||
@ -337,21 +336,21 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
PatchName(f, &item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
case *rbacv1.RoleBinding:
|
||||
PatchNamespace(f, driverNamespace, &item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
case *v1.Service:
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
@ -372,7 +371,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
return fmt.Errorf("missing support for patching item of type %T", item)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -395,7 +394,7 @@ func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ServiceAccount")
|
||||
return nil, fmt.Errorf("create ServiceAccount: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -417,7 +416,7 @@ func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
framework.Logf("Define cluster role %v", item.GetName())
|
||||
client := f.ClientSet.RbacV1().ClusterRoles()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRole")
|
||||
return nil, fmt.Errorf("create ClusterRole: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -438,7 +437,7 @@ func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespac
|
||||
|
||||
client := f.ClientSet.RbacV1().ClusterRoleBindings()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRoleBinding")
|
||||
return nil, fmt.Errorf("create ClusterRoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -459,7 +458,7 @@ func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Role")
|
||||
return nil, fmt.Errorf("create Role: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -480,7 +479,7 @@ func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create RoleBinding")
|
||||
return nil, fmt.Errorf("create RoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -501,7 +500,7 @@ func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interf
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Service")
|
||||
return nil, fmt.Errorf("create Service: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -522,7 +521,7 @@ func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create StatefulSet")
|
||||
return nil, fmt.Errorf("create StatefulSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -543,7 +542,7 @@ func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create DaemonSet")
|
||||
return nil, fmt.Errorf("create DaemonSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -564,7 +563,7 @@ func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i i
|
||||
|
||||
client := f.ClientSet.StorageV1().StorageClasses()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create StorageClass")
|
||||
return nil, fmt.Errorf("create StorageClass: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -585,7 +584,7 @@ func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
|
||||
client := f.ClientSet.StorageV1().CSIDrivers()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create CSIDriver")
|
||||
return nil, fmt.Errorf("create CSIDriver: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -606,7 +605,7 @@ func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interfa
|
||||
|
||||
client := f.ClientSet.CoreV1().Secrets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Secret")
|
||||
return nil, fmt.Errorf("create Secret: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -19,6 +19,9 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -46,6 +49,8 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
podEventLog := ginkgo.GinkgoWriter
|
||||
var podEventLogCloser io.Closer
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
}
|
||||
@ -69,17 +74,22 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||
strings.Join(components, "/") + "/"
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(components, "/")
|
||||
to.LogPathPrefix = logDir + "/"
|
||||
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
framework.ExpectNoError(err, "create pod log directory")
|
||||
f, err := os.Create(path.Join(logDir, "pod-event.log"))
|
||||
framework.ExpectNoError(err, "create pod events log file")
|
||||
podEventLog = f
|
||||
podEventLogCloser = f
|
||||
}
|
||||
podlogs.CopyAllLogs(ctx, cs, ns, to)
|
||||
|
||||
// pod events are something that the framework already collects itself
|
||||
// after a failed test. Logging them live is only useful for interactive
|
||||
// debugging, not when we collect reports.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
|
||||
}
|
||||
// The framework doesn't know about the driver pods because of
|
||||
// the separate namespace. Therefore we always capture the
|
||||
// events ourselves.
|
||||
podlogs.WatchPods(ctx, cs, ns, podEventLog, podEventLogCloser)
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -130,7 +130,6 @@ func GenerateSnapshotClassSpec(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
suffix string,
|
||||
) *unstructured.Unstructured {
|
||||
snapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
@ -138,8 +137,7 @@ func GenerateSnapshotClassSpec(
|
||||
"apiVersion": SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
// TODO(#96234): Remove unnecessary suffix.
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -571,6 +572,16 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
}
|
||||
|
||||
// GetSectorSize returns the sector size of the device.
|
||||
func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
|
||||
stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("blockdev --getss %s", device))
|
||||
framework.ExpectNoError(err, "Failed to get sector size of %s", device)
|
||||
ss, err := strconv.Atoi(stdout)
|
||||
framework.ExpectNoError(err, "Sector size returned by blockdev command isn't integer value.")
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
|
||||
|
Reference in New Issue
Block a user