rebase: update kubernetes to v1.21.2

Updated kubernetes packages to latest release.
resizefs package has been included into k8s.io/mount-utils
package. updated code to use the same.

Updates: #1968

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R
2021-06-25 10:29:51 +05:30
committed by mergify[bot]
parent 8ce5ae16c1
commit 1b23d78113
1115 changed files with 98825 additions and 12365 deletions

View File

@ -0,0 +1,310 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package podlogs enables live capturing of all events and log
// messages for some or all pods in a namespace as they get generated.
// This helps debugging both a running test (what is currently going
// on?) and the output of a CI run (events appear in chronological
// order and output that normally isn't available like the command
// stdout messages are available).
package podlogs
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"path"
"regexp"
"strings"
"sync"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
// LogOutput determines where output from CopyAllLogs goes.
type LogOutput struct {
// If not nil, errors will be logged here.
StatusWriter io.Writer
// If not nil, all output goes to this writer with "<pod>/<container>:" as prefix.
LogWriter io.Writer
// Base directory for one log file per container.
// The full path of each log file will be <log path prefix><pod>-<container>.log.
LogPathPrefix string
}
// Matches harmless errors from pkg/kubelet/kubelet_pods.go.
var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|waiting to start|not available)|the server could not find the requested resource`)
// CopyAllLogs follows the logs of all containers in all pods,
// including those that get created in the future, and writes each log
// line as configured in the output options. It does that until the
// context is done or until an error occurs.
//
// Beware that there is currently no way to force log collection
// before removing pods, which means that there is a known race
// between "stop pod" and "collecting log entries". The alternative
// would be a blocking function with collects logs from all currently
// running pods, but that then would have the disadvantage that
// already deleted pods aren't covered.
//
// Another race occurs is when a pod shuts down. Logging stops, but if
// then the pod is not removed from the apiserver quickly enough, logging
// resumes and dumps the old log again. Previously, this was allowed based
// on the assumption that it is better to log twice than miss log messages
// of pods that started and immediately terminated or when logging temporarily
// stopped.
//
// But it turned out to be rather confusing, so now a heuristic is used: if
// log output of a container was already captured, then capturing does not
// resume if the pod is marked for deletion.
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}
go func() {
var m sync.Mutex
// Key is pod/container name, true if currently logging it.
active := map[string]bool{}
// Key is pod/container/container-id, true if we have ever started to capture its output.
started := map[string]bool{}
check := func() {
m.Lock()
defer m.Unlock()
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{})
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
}
return
}
for _, pod := range pods.Items {
for i, c := range pod.Spec.Containers {
// sanity check, array should have entry for each container
if len(pod.Status.ContainerStatuses) <= i {
continue
}
name := pod.ObjectMeta.Name + "/" + c.Name
id := name + "/" + pod.Status.ContainerStatuses[i].ContainerID
if active[name] ||
// If we have worked on a container before and it has now terminated, then
// there cannot be any new output and we can ignore it.
(pod.Status.ContainerStatuses[i].State.Terminated != nil &&
started[id]) ||
// State.Terminated might not have been updated although the container already
// stopped running. Also check whether the pod is deleted.
(pod.DeletionTimestamp != nil && started[id]) ||
// Don't attempt to get logs for a container unless it is running or has terminated.
// Trying to get a log would just end up with an error that we would have to suppress.
(pod.Status.ContainerStatuses[i].State.Running == nil &&
pod.Status.ContainerStatuses[i].State.Terminated == nil) {
continue
}
readCloser, err := logsForPod(ctx, cs, ns, pod.ObjectMeta.Name,
&v1.PodLogOptions{
Container: c.Name,
Follow: true,
})
if err != nil {
// We do get "normal" errors here, like trying to read too early.
// We can ignore those.
if to.StatusWriter != nil &&
expectedErrors.FindStringIndex(err.Error()) == nil {
fmt.Fprintf(to.StatusWriter, "WARNING: pod log: %s: %s\n", name, err)
}
continue
}
// Determine where we write. If this fails, we intentionally return without clearing
// the active[name] flag, which prevents trying over and over again to
// create the output file.
var out io.Writer
var closer io.Closer
var prefix string
if to.LogWriter != nil {
out = to.LogWriter
nodeName := pod.Spec.NodeName
if len(nodeName) > 10 {
nodeName = nodeName[0:4] + ".." + nodeName[len(nodeName)-4:]
}
prefix = name + "@" + nodeName + ": "
} else {
var err error
filename := to.LogPathPrefix + pod.ObjectMeta.Name + "-" + c.Name + ".log"
err = os.MkdirAll(path.Dir(filename), 0755)
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create directory for %s: %s\n", filename, err)
}
return
}
// The test suite might run the same test multiple times,
// so we have to append here.
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create file %s: %s\n", filename, err)
}
return
}
closer = file
out = file
}
go func() {
if closer != nil {
defer closer.Close()
}
first := true
defer func() {
m.Lock()
// If we never printed anything, then also skip the final message.
if !first {
if prefix != "" {
fmt.Fprintf(out, "%s==== end of pod log ====\n", prefix)
} else {
fmt.Fprintf(out, "==== end of pod log for container %s ====\n", name)
}
}
active[name] = false
m.Unlock()
readCloser.Close()
}()
scanner := bufio.NewScanner(readCloser)
for scanner.Scan() {
line := scanner.Text()
// Filter out the expected "end of stream" error message,
// it would just confuse developers who don't know about it.
// Same for attempts to read logs from a container that
// isn't ready (yet?!).
if !strings.HasPrefix(line, "rpc error: code = Unknown desc = Error: No such container:") &&
!strings.HasPrefix(line, "unable to retrieve container logs for ") &&
!strings.HasPrefix(line, "Unable to retrieve container logs for ") {
if first {
// Because the same log might be written to multiple times
// in different test instances, log an extra line to separate them.
// Also provides some useful extra information.
if prefix == "" {
fmt.Fprintf(out, "==== start of pod log for container %s ====\n", name)
} else {
fmt.Fprintf(out, "%s==== start of pod log ====\n", prefix)
}
first = false
}
fmt.Fprintf(out, "%s%s\n", prefix, line)
}
}
}()
active[name] = true
started[id] = true
}
}
}
// Watch events to see whether we can start logging
// and log interesting ones.
check()
for {
select {
case <-watcher.ResultChan():
check()
case <-ctx.Done():
return
}
}
}()
return nil
}
// logsForPod starts reading the logs for a certain pod. If the pod has more than one
// container, opts.Container must be set. Reading stops when the context is done.
// The stream includes formatted error messages and ends with
// rpc error: code = Unknown desc = Error: No such container: 41a...
// when the pod gets deleted while streaming.
func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
return cs.CoreV1().Pods(ns).GetLogs(pod, opts).Stream(ctx)
}
// WatchPods prints pod status events for a certain namespace or all namespaces
// when namespace name is empty.
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}
go func() {
defer watcher.Stop()
for {
select {
case e := <-watcher.ResultChan():
if e.Object == nil {
continue
}
pod, ok := e.Object.(*v1.Pod)
if !ok {
continue
}
buffer := new(bytes.Buffer)
fmt.Fprintf(buffer,
"pod event: %s: %s/%s %s: %s %s\n",
e.Type,
pod.Namespace,
pod.Name,
pod.Status.Phase,
pod.Status.Reason,
pod.Status.Conditions,
)
for _, cst := range pod.Status.ContainerStatuses {
fmt.Fprintf(buffer, " %s: ", cst.Name)
if cst.State.Waiting != nil {
fmt.Fprintf(buffer, "WAITING: %s - %s",
cst.State.Waiting.Reason,
cst.State.Waiting.Message,
)
} else if cst.State.Running != nil {
fmt.Fprintf(buffer, "RUNNING")
} else if cst.State.Terminated != nil {
fmt.Fprintf(buffer, "TERMINATED: %s - %s",
cst.State.Terminated.Reason,
cst.State.Terminated.Message,
)
}
fmt.Fprintf(buffer, "\n")
}
to.Write(buffer.Bytes())
case <-ctx.Done():
return
}
}
}()
return nil
}

View File

@ -1,62 +0,0 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"create.go",
"deployment.go",
"ebs.go",
"framework.go",
"host_exec.go",
"local.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/exec:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -41,7 +41,6 @@ import (
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the provisioner binary accepts a --provisioner parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
@ -86,10 +85,6 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
switch container.Name {
case o.DriverContainerName:
container.Args = append(container.Args, o.DriverContainerArguments...)
case o.ProvisionerContainerName:
// Driver name is expected to be the same
// as the provisioner here.
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
}
}
}

View File

@ -106,7 +106,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
return pod
}

182
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
)
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := driverNamespace.Name
to := podlogs.LogOutput{
StatusWriter: ginkgo.GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = ginkgo.GinkgoWriter
} else {
test := ginkgo.CurrentGinkgoTestDescription()
// Clean up each individual component text such that
// it contains only characters that are valid as file
// name.
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
var components []string
for _, component := range test.ComponentTexts {
components = append(components, reg.ReplaceAllString(component, "_"))
}
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// Each component name maps to a directory. This
// avoids cluttering the root artifact directory and
// keeps each directory name smaller (the full test
// name at one point exceeded 256 characters, which was
// too much for some filesystems).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
strings.Join(components, "/") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
}
return cancel
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}

View File

@ -0,0 +1,151 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// SnapshotGroup is the snapshot CRD api group
SnapshotGroup = "snapshot.storage.k8s.io"
// SnapshotAPIVersion is the snapshot CRD api version
SnapshotAPIVersion = "snapshot.storage.k8s.io/v1"
)
var (
// SnapshotGVR is GroupVersionResource for volumesnapshots
SnapshotGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshots"}
// SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses
SnapshotClassGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"}
// SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents
SnapshotContentGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"}
)
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
if successful := WaitUntil(poll, timeout, func() bool {
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
return false
}
status := snapshot.Object["status"]
if status == nil {
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
return false
}
value := status.(map[string]interface{})
if value["readyToUse"] == true {
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName)
return true
}
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
return false
}); successful {
return nil
}
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout)
}
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
// given VolumeSnapshot
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
defer ginkgo.GinkgoRecover()
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
framework.ExpectNoError(err)
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
framework.Logf("received snapshotStatus %v", snapshotStatus)
framework.Logf("snapshotContentName %s", snapshotContentName)
framework.ExpectNoError(err)
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
return vscontent
}
// DeleteSnapshotWithoutWaiting deletes a VolumeSnapshot and return directly without waiting
func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName string) error {
ginkgo.By("deleting the snapshot")
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
var err error
err = DeleteSnapshotWithoutWaiting(dc, ns, snapshotName)
if err != nil {
return err
}
ginkgo.By("checking the Snapshot has been deleted")
err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
return err
}
// GenerateSnapshotClassSpec constructs a new SnapshotClass instance spec
// with a unique name that is based on namespace + suffix.
func GenerateSnapshotClassSpec(
snapshotter string,
parameters map[string]string,
ns string,
suffix string,
) *unstructured.Unstructured {
snapshotClass := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeSnapshotClass",
"apiVersion": SnapshotAPIVersion,
"metadata": map[string]interface{}{
// Name must be unique, so let's base it on namespace name and use GenerateName
// TODO(#96234): Remove unnecessary suffix.
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
},
"driver": snapshotter,
"parameters": parameters,
"deletionPolicy": "Delete",
},
}
return snapshotClass
}

View File

@ -21,6 +21,7 @@ import (
"crypto/sha256"
"encoding/base64"
"fmt"
"math"
"math/rand"
"path/filepath"
"strings"
@ -32,6 +33,7 @@ import (
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -39,13 +41,11 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
// KubeletOpt type definition
@ -59,7 +59,9 @@ const (
// KStop defines stop value
KStop KubeletOpt = "stop"
// KRestart defines restart value
KRestart KubeletOpt = "restart"
KRestart KubeletOpt = "restart"
minValidSize string = "1Ki"
maxValidSize string = "10Ei"
)
const (
@ -67,37 +69,10 @@ const (
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
if framework.NodeOSDistroIs("windows") {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
}
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
@ -105,131 +80,6 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
@ -325,7 +175,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
@ -411,7 +261,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {
@ -446,7 +296,7 @@ func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Fra
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -489,7 +339,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
}
// StartExternalProvisioner create external provisioner pod
@ -614,46 +464,39 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
}
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// CheckReadWriteToPath check that path can b e read and written
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
// Check that writing to directory as block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
@ -699,8 +542,8 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
}
// CheckWriteToPath that file can be properly written.
@ -724,8 +567,8 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
}
// findMountPoints returns all mount points on given node under specified directory.
@ -866,7 +709,7 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr
// VerifyFilePathGidInPod verfies expected GID of the target filepath
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
@ -877,7 +720,90 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
// ChangeFilePathGidInPod changes the GID of the target filepath.
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
_, _, err := PodExec(f, pod, cmd)
_, _, err := e2evolume.PodExec(f, pod, cmd)
framework.ExpectNoError(err)
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
}
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func DeleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}
// CreateVolumeSource creates a volume source object
func CreateVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
return &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: readOnly,
},
}
}
// TryFunc try to execute the function and return err if there is any
func TryFunc(f func()) error {
var err error
if f == nil {
return nil
}
defer func() {
if recoverError := recover(); recoverError != nil {
err = fmt.Errorf("%v", recoverError)
}
}()
f()
return err
}
// GetSizeRangesIntersection takes two instances of storage size ranges and determines the
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func GetSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
//if SizeRange is not set, assign a minimum or maximum size
if len(first.Min) == 0 {
first.Min = minValidSize
}
if len(first.Max) == 0 {
first.Max = maxValidSize
}
if len(second.Min) == 0 {
second.Min = minValidSize
}
if len(second.Max) == 0 {
second.Max = maxValidSize
}
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
return "", err
}
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
return "", err
}
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
return "", err
}
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
return "", err
}
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
// the minimum of the intersection shall be returned as the claim size
var intersectionMin resource.Quantity
if intersectionEnd-interSectionStart >= 0 { //have intersection
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
// return the minimum of the intersection as the claim size
return intersectionMin.String(), nil
}
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
}