rebase: update kubernetes to 1.26.1

update kubernetes and its dependencies
to v1.26.1

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-02-01 18:06:36 +01:00
committed by mergify[bot]
parent e9e33fb851
commit 9c8de9471e
937 changed files with 75539 additions and 33050 deletions

View File

@ -275,6 +275,7 @@ var factories = map[What]ItemFactory{
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"CSIDriver"}: &csiDriverFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"ReplicaSet"}: &replicaSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
@ -315,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
case *rbacv1.RoleRef:
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
// which contains all role names that are defined cluster-wide before the test starts?
// All those names are excempt from renaming. That list could be populated by querying
// All those names are exempt from renaming. That list could be populated by querying
// and get extended by tests.
if item.Name != "e2e-test-privileged-psp" {
PatchName(f, &item.Name)
@ -382,6 +383,14 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *appsv1.ReplicaSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *apiextensionsv1.CustomResourceDefinition:
// Do nothing. Patching name to all CRDs won't always be the expected behavior.
default:
@ -584,6 +593,27 @@ func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
}, nil
}
type replicaSetFactory struct{}
func (f *replicaSetFactory) New() runtime.Object {
return &appsv1.ReplicaSet{}
}
func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.ReplicaSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().ReplicaSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create ReplicaSet: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type storageClassFactory struct{}
func (f *storageClassFactory) New() runtime.Object {

View File

@ -152,6 +152,9 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
if o.FSGroupPolicy != nil {
object.Spec.FSGroupPolicy = o.FSGroupPolicy
}
if o.SELinuxMount != nil {
object.Spec.SELinuxMount = o.SELinuxMount
}
}
return nil
@ -211,4 +214,8 @@ type PatchCSIOptions struct {
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
FSGroupPolicy *storagev1.FSGroupPolicy
// If not nil, the value to use for the CSIDriver.Spec.SELinuxMount
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
SELinuxMount *bool
}

View File

@ -149,7 +149,7 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
}
containerName := pod.Spec.Containers[0].Name
var err error
result.Stdout, result.Stderr, err = h.Framework.ExecWithOptions(framework.ExecOptions{
result.Stdout, result.Stderr, err = e2epod.ExecWithOptions(h.Framework, e2epod.ExecOptions{
Command: args,
Namespace: pod.Namespace,
PodName: pod.Name,

View File

@ -26,7 +26,7 @@ import (
"strings"
"github.com/onsi/ginkgo/v2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)

View File

@ -152,7 +152,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
if !isPidChanged {
framework.Fail("Kubelet PID remained unchanged after restarting Kubelet")
}
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}

View File

@ -80,9 +80,9 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
// given VolumeSnapshot
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
defer ginkgo.GinkgoRecover()
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
framework.ExpectNoError(err)
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})

View File

@ -95,45 +95,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
}
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
}
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
@ -152,6 +151,11 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
ginkgo.By("Writing to the volume.")
byteLen := 64
seed := time.Now().UTC().UnixNano()
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
@ -159,6 +163,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
if secondPod != nil {
ginkgo.By("Starting the second pod")
_, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "when starting the second pod")
}
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
@ -180,6 +190,29 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
time.Sleep(30 * time.Second)
}
if secondPod != nil {
ginkgo.By("Waiting for the second pod.")
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
framework.ExpectNoError(err, "while waiting for the second pod Running")
ginkgo.By("Getting the second pod uuid.")
secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting the second UID")
ginkgo.By("Expecting the volume mount to be found in the second pod.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "when deleting the second pod")
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
}
ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
@ -195,21 +228,22 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22"
@ -280,13 +314,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
}
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
}
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.