From 29ddfb501b5ae7d1c613f8adebb7330e00a5edb6 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Thu, 23 Jun 2022 08:50:35 +0530 Subject: [PATCH 01/10] rebase: update minikube to v1.26.0 A new stable release of minikube is available, lets switch to it. https://github.com/kubernetes/minikube/releases/tag/v1.26.0 Signed-off-by: Prasanna Kumar Kalever --- build.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.env b/build.env index 87a7ee343..88d98bdd0 100644 --- a/build.env +++ b/build.env @@ -38,7 +38,7 @@ SNAPSHOT_VERSION=v6.0.1 HELM_VERSION=v3.9.0 # minikube settings -MINIKUBE_VERSION=v1.26.0-beta.1 +MINIKUBE_VERSION=v1.26.0 VM_DRIVER=none CHANGE_MINIKUBE_NONE_USER=true From 2df55a55a3f276ffeaac3dc36450702ce47f60f8 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 29 Jun 2022 09:37:50 +0200 Subject: [PATCH 02/10] e2e: use `exclusive-lock` together with `lock_on_read` When using `lock_on_read`, the RBD image needs to have the `exclusive-lock` feature enabled too. Fixes: #3221 Signed-off-by: Niels de Vos --- e2e/rbd.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index cafff25b9..595131ab8 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -3747,8 +3747,9 @@ var _ = Describe("RBD", func() { e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{ - "mapOptions": "lock_on_read,queue_depth=1024", - "unmapOptions": "force", + "imageFeatures": "exclusive-lock", + "mapOptions": "lock_on_read,queue_depth=1024", + "unmapOptions": "force", }, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass: %v", err) From dbbda5473b75b277246cd9e90d2abf9eea404d4c Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 24 Jun 2022 18:25:20 +0200 Subject: [PATCH 03/10] e2e: pass non-empty Namespace/Name in deletePVCAndPV() When getting the PVC or PV failed, the returned object may contain empty values. If that happens, a retry uses the empty values for Namespace and Name, which will never be successful. Instead, use the Namespace and Name attributes from the original object, and not from the object returned by the Get() call. Signed-off-by: Niels de Vos --- e2e/pvc.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/pvc.go b/e2e/pvc.go index f87a153cd..41bcb4845 100644 --- a/e2e/pvc.go +++ b/e2e/pvc.go @@ -138,8 +138,8 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v pvcToDelete.Status.String(), int(time.Since(start).Seconds())) pvcToDelete, err = c.CoreV1(). - PersistentVolumeClaims(pvcToDelete.Namespace). - Get(context.TODO(), pvcToDelete.Name, metav1.GetOptions{}) + PersistentVolumeClaims(pvc.Namespace). + Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err == nil { if pvcToDelete.Status.Phase == "" { // this is unexpected, an empty Phase is not defined @@ -176,7 +176,7 @@ func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v pvToDelete.Status.String(), int(time.Since(start).Seconds())) - pvToDelete, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvToDelete.Name, metav1.GetOptions{}) + pvToDelete, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if err == nil { return false, nil } From 5c40f1ef332300b3af9db2ab1e7b9f52caa37c66 Mon Sep 17 00:00:00 2001 From: Yati Padia Date: Thu, 2 Jun 2022 16:31:43 +0530 Subject: [PATCH 04/10] rbd: remove the clone in case of failure This commit removes the clone incase unsetAllMetadata or copyEncryptionConfig or expand fails for createVolumeFromSnapshot and CreateSnapshot. It also removes the clone in case of any failure in createCloneFromImage. issue: #3103 Signed-off-by: Yati Padia --- internal/rbd/clone.go | 10 ++++++++++ internal/rbd/controllerserver.go | 21 +++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/internal/rbd/clone.go b/internal/rbd/clone.go index 41efa2584..a0faa468b 100644 --- a/internal/rbd/clone.go +++ b/internal/rbd/clone.go @@ -138,6 +138,16 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol return err } + defer func() { + if err != nil { + log.DebugLog(ctx, "Removing clone image %q", rv) + errDefer := rv.deleteImage(ctx) + if errDefer != nil { + log.ErrorLog(ctx, "failed to delete clone image %q: %v", rv, errDefer) + } + } + }() + err = rv.getImageID() if err != nil { log.ErrorLog(ctx, "failed to get volume id %s: %v", rv, err) diff --git a/internal/rbd/controllerserver.go b/internal/rbd/controllerserver.go index b6a8f7e6a..c94790553 100644 --- a/internal/rbd/controllerserver.go +++ b/internal/rbd/controllerserver.go @@ -644,6 +644,17 @@ func (cs *ControllerServer) createVolumeFromSnapshot( return err } + + defer func() { + if err != nil { + log.DebugLog(ctx, "Removing clone image %q", rbdVol) + errDefer := rbdVol.deleteImage(ctx) + if errDefer != nil { + log.ErrorLog(ctx, "failed to delete clone image %q: %v", rbdVol, errDefer) + } + } + }() + err = rbdVol.unsetAllMetadata(k8s.GetSnapshotMetadataKeys()) if err != nil { log.ErrorLog(ctx, "failed to unset snapshot metadata on rbd image %q: %v", rbdVol, err) @@ -1142,6 +1153,16 @@ func (cs *ControllerServer) CreateSnapshot( rbdVol.RbdImageName = rbdSnap.RbdSnapName rbdVol.ClusterName = cs.ClusterName + defer func() { + if err != nil { + log.DebugLog(ctx, "Removing clone image %q", rbdVol) + errDefer := rbdVol.deleteImage(ctx) + if errDefer != nil { + log.ErrorLog(ctx, "failed to delete clone image %q: %v", rbdVol, errDefer) + } + } + }() + err = rbdVol.unsetAllMetadata(k8s.GetVolumeMetadataKeys()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) From a1ed6207f6b079b33ebed23dee818d6e351c179d Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Tue, 21 Jun 2022 13:49:13 +0200 Subject: [PATCH 05/10] cephfs: report detailed error message on clone failure go-ceph provides a new GetFailure() method to retrieve details errors when cloning failed. This is now included in the `cephFSCloneState` struct, which was a simple string before. While modifying the `cephFSCloneState` struct, the constants have been removed, as go-ceph provides them as well. Fixes: #3140 Signed-off-by: Niels de Vos --- internal/cephfs/core/clone.go | 70 ++++++++++++++++++------------ internal/cephfs/core/clone_test.go | 12 ++--- internal/cephfs/store/fsjournal.go | 18 ++++---- 3 files changed, 59 insertions(+), 41 deletions(-) diff --git a/internal/cephfs/core/clone.go b/internal/cephfs/core/clone.go index beb7d81e6..ca88a4914 100644 --- a/internal/cephfs/core/clone.go +++ b/internal/cephfs/core/clone.go @@ -19,43 +19,42 @@ package core import ( "context" "errors" + "fmt" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" "github.com/ceph/ceph-csi/internal/util/log" + + "github.com/ceph/go-ceph/cephfs/admin" ) // cephFSCloneState describes the status of the clone. -type cephFSCloneState string +type cephFSCloneState struct { + state admin.CloneState + errno string + errorMsg string +} const ( - // CephFSCloneError indicates that fetching the clone state returned an error. - CephFSCloneError = cephFSCloneState("") - // CephFSCloneFailed indicates that clone is in failed state. - CephFSCloneFailed = cephFSCloneState("failed") - // CephFSClonePending indicates that clone is in pending state. - CephFSClonePending = cephFSCloneState("pending") - // CephFSCloneInprogress indicates that clone is in in-progress state. - CephFSCloneInprogress = cephFSCloneState("in-progress") - // CephFSCloneComplete indicates that clone is in complete state. - CephFSCloneComplete = cephFSCloneState("complete") - // SnapshotIsProtected string indicates that the snapshot is currently protected. SnapshotIsProtected = "yes" ) -// toError checks the state of the clone if it's not cephFSCloneComplete. -func (cs cephFSCloneState) toError() error { - switch cs { - case CephFSCloneComplete: +// CephFSCloneError indicates that fetching the clone state returned an error. +var CephFSCloneError = cephFSCloneState{} + +// ToError checks the state of the clone if it's not cephFSCloneComplete. +func (cs cephFSCloneState) ToError() error { + switch cs.state { + case admin.CloneComplete: return nil - case CephFSCloneError: - return cerrors.ErrInvalidClone - case CephFSCloneInprogress: + case CephFSCloneError.state: + return fmt.Errorf("%w: %s (%s)", cerrors.ErrInvalidClone, cs.errorMsg, cs.errno) + case admin.CloneInProgress: return cerrors.ErrCloneInProgress - case CephFSClonePending: + case admin.ClonePending: return cerrors.ErrClonePending - case CephFSCloneFailed: - return cerrors.ErrCloneFailed + case admin.CloneFailed: + return fmt.Errorf("%w: %s (%s)", cerrors.ErrCloneFailed, cs.errorMsg, cs.errno) } return nil @@ -125,10 +124,11 @@ func (s *subVolumeClient) CreateCloneFromSubvolume( return cloneErr } - if cloneState != CephFSCloneComplete { - log.ErrorLog(ctx, "clone %s did not complete: %v", s.VolID, cloneState.toError()) + err = cloneState.ToError() + if err != nil { + log.ErrorLog(ctx, "clone %s did not complete: %v", s.VolID, err) - return cloneState.toError() + return err } err = s.ExpandVolume(ctx, s.Size) @@ -220,8 +220,9 @@ func (s *subVolumeClient) CreateCloneFromSnapshot( return err } - if cloneState != CephFSCloneComplete { - return cloneState.toError() + err = cloneState.ToError() + if err != nil { + return err } err = s.ExpandVolume(ctx, s.Size) @@ -255,5 +256,18 @@ func (s *subVolumeClient) GetCloneState(ctx context.Context) (cephFSCloneState, return CephFSCloneError, err } - return cephFSCloneState(cs.State), nil + errno := "" + errStr := "" + if failure := cs.GetFailure(); failure != nil { + errno = failure.Errno + errStr = failure.ErrStr + } + + state := cephFSCloneState{ + state: cs.State, + errno: errno, + errorMsg: errStr, + } + + return state, nil } diff --git a/internal/cephfs/core/clone_test.go b/internal/cephfs/core/clone_test.go index 72b852b2b..4a74a4c3b 100644 --- a/internal/cephfs/core/clone_test.go +++ b/internal/cephfs/core/clone_test.go @@ -17,23 +17,25 @@ limitations under the License. package core import ( + "errors" "testing" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" + fsa "github.com/ceph/go-ceph/cephfs/admin" "github.com/stretchr/testify/assert" ) func TestCloneStateToError(t *testing.T) { t.Parallel() errorState := make(map[cephFSCloneState]error) - errorState[CephFSCloneComplete] = nil + errorState[cephFSCloneState{fsa.CloneComplete, "", ""}] = nil errorState[CephFSCloneError] = cerrors.ErrInvalidClone - errorState[CephFSCloneInprogress] = cerrors.ErrCloneInProgress - errorState[CephFSClonePending] = cerrors.ErrClonePending - errorState[CephFSCloneFailed] = cerrors.ErrCloneFailed + errorState[cephFSCloneState{fsa.CloneInProgress, "", ""}] = cerrors.ErrCloneInProgress + errorState[cephFSCloneState{fsa.ClonePending, "", ""}] = cerrors.ErrClonePending + errorState[cephFSCloneState{fsa.CloneFailed, "", ""}] = cerrors.ErrCloneFailed for state, err := range errorState { - assert.Equal(t, state.toError(), err) + assert.True(t, errors.Is(state.ToError(), err)) } } diff --git a/internal/cephfs/store/fsjournal.go b/internal/cephfs/store/fsjournal.go index f787d6e3b..9d84256fd 100644 --- a/internal/cephfs/store/fsjournal.go +++ b/internal/cephfs/store/fsjournal.go @@ -119,15 +119,17 @@ func CheckVolExists(ctx context.Context, return nil, err } - if cloneState == core.CephFSCloneInprogress { - return nil, cerrors.ErrCloneInProgress + err = cloneState.ToError() + if errors.Is(err, cerrors.ErrCloneInProgress) { + return nil, err } - if cloneState == core.CephFSClonePending { - return nil, cerrors.ErrClonePending + if errors.Is(err, cerrors.ErrClonePending) { + return nil, err } - if cloneState == core.CephFSCloneFailed { + if errors.Is(err, cerrors.ErrCloneFailed) { log.ErrorLog(ctx, - "clone failed, deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s", + "clone failed (%v), deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s", + err, volOptions.FsName, vid.FsSubvolName, volOptions.SubvolumeGroup) @@ -149,8 +151,8 @@ func CheckVolExists(ctx context.Context, return nil, err } - if cloneState != core.CephFSCloneComplete { - return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName) + if err != nil { + return nil, fmt.Errorf("clone is not in complete state for %s: %w", vid.FsSubvolName, err) } } From 14ba1498bfded74b112904c9b5aecc1f36401f9e Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Wed, 29 Jun 2022 15:53:55 +0200 Subject: [PATCH 06/10] util: reduce systemd related errors while mounting There are regular reports that identify a non-error as the cause of failures. The Kubernetes mount-utils package has detection for systemd based environments, and if systemd is unavailable, the following error is logged: Cannot run systemd-run, assuming non-systemd OS systemd-run output: System has not been booted with systemd as init system (PID 1). Can't operate. Failed to create bus connection: Host is down, failed with: exit status 1 Because of the `failed` and `exit status 1` error message, users might assume that the mounting failed. This does not need to be the case. The container-images that the Ceph-CSI projects provides, do not use systemd, so the error will get logged with each mount attempt. By using the newer MountSensitiveWithoutSystemd() function from the mount-utils package where we can, the number of confusing logs get reduced. Signed-off-by: Niels de Vos --- internal/rbd/nodeserver.go | 2 +- internal/util/util.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 057cf9e44..0e13cb787 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -773,7 +773,7 @@ func (ns *NodeServer) mountVolumeToStagePath( if isBlock { opt = append(opt, "bind") - err = diskMounter.Mount(devicePath, stagingPath, fsType, opt) + err = diskMounter.MountSensitiveWithoutSystemd(devicePath, stagingPath, fsType, opt, nil) } else { err = diskMounter.FormatAndMount(devicePath, stagingPath, fsType, opt) } diff --git a/internal/util/util.go b/internal/util/util.go index b9d7ea173..857b7b19d 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -330,7 +330,7 @@ func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) { func Mount(source, target, fstype string, options []string) error { dummyMount := mount.New("") - return dummyMount.Mount(source, target, fstype, options) + return dummyMount.MountSensitiveWithoutSystemd(source, target, fstype, options, nil) } // MountOptionsAdd adds the `add` mount options to the `options` and returns a From b262f06c33d06c2384856240916b6d13636e1568 Mon Sep 17 00:00:00 2001 From: Carsten Buchberger Date: Mon, 13 Jun 2022 15:31:38 +0200 Subject: [PATCH 07/10] helm: enable host networking for provisioner Adds the possibility in the helm-chart to enable hostNetworking for provider pods. Signed-off-by: Carsten Buchberger --- charts/ceph-csi-cephfs/README.md | 1 + charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml | 1 + charts/ceph-csi-cephfs/values.yaml | 4 ++++ charts/ceph-csi-rbd/README.md | 1 + charts/ceph-csi-rbd/templates/provisioner-deployment.yaml | 1 + charts/ceph-csi-rbd/values.yaml | 4 ++++ 6 files changed, 12 insertions(+) diff --git a/charts/ceph-csi-cephfs/README.md b/charts/ceph-csi-cephfs/README.md index 604e8be53..18e57fea5 100644 --- a/charts/ceph-csi-cephfs/README.md +++ b/charts/ceph-csi-cephfs/README.md @@ -104,6 +104,7 @@ charts and their default values. | `provisioner.replicaCount` | Specifies the replicaCount | `3` | | `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` | | `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` | +| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` | | `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` | | `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` | | `provisioner.provisioner.image.tag` | Specifies image tag | `v3.1.0` | diff --git a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml index 3c20609fe..5da7f1e73 100644 --- a/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-cephfs/templates/provisioner-deployment.yaml @@ -48,6 +48,7 @@ spec: topologyKey: "kubernetes.io/hostname" {{- end }} serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }} + hostNetwork: {{ .Values.provisioner.enableHostNetwork }} {{- if .Values.provisioner.priorityClassName }} priorityClassName: {{ .Values.provisioner.priorityClassName }} {{- end }} diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml index b21871a09..351e2d183 100644 --- a/charts/ceph-csi-cephfs/values.yaml +++ b/charts/ceph-csi-cephfs/values.yaml @@ -125,6 +125,10 @@ provisioner: # system-cluster-critical which is less priority than system-node-critical priorityClassName: system-cluster-critical + # enable hostnetwork for provisioner pod. default is false + # useful for deployments where the podNetwork has no access to ceph + enableHostNetwork: false + httpMetrics: # Metrics only available for cephcsi/cephcsi => 1.2.0 # Specifies whether http metrics should be exposed diff --git a/charts/ceph-csi-rbd/README.md b/charts/ceph-csi-rbd/README.md index 9e5b413e6..234c570ca 100644 --- a/charts/ceph-csi-rbd/README.md +++ b/charts/ceph-csi-rbd/README.md @@ -114,6 +114,7 @@ charts and their default values. | `provisioner.clustername` | Cluster name to set on the RBD image | "" | | `provisioner.setmetadata` | Set metadata on volume | `true` | | `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` | +| `provisioner.enableHostNetwork` | Specifies whether hostNetwork is enabled for provisioner pod. | `false` | | `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` | | `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` | | `provisioner.provisioner.image.tag` | Specifies image tag | `canary` | diff --git a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml index 540b8add8..3738c3a06 100644 --- a/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml +++ b/charts/ceph-csi-rbd/templates/provisioner-deployment.yaml @@ -48,6 +48,7 @@ spec: topologyKey: "kubernetes.io/hostname" {{- end }} serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }} + hostNetwork: {{ .Values.provisioner.enableHostNetwork }} {{- if .Values.provisioner.priorityClassName }} priorityClassName: {{ .Values.provisioner.priorityClassName }} {{- end }} diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml index 8c67572e6..8b3970570 100644 --- a/charts/ceph-csi-rbd/values.yaml +++ b/charts/ceph-csi-rbd/values.yaml @@ -164,6 +164,10 @@ provisioner: # system-cluster-critical which is less priority than system-node-critical priorityClassName: system-cluster-critical + # enable hostnetwork for provisioner pod. default is false + # useful for deployments where the podNetwork has no access to ceph + enableHostNetwork: false + httpMetrics: # Metrics only available for cephcsi/cephcsi => 1.2.0 # Specifies whether http metrics should be exposed From 08b42e5d67469232b9351157b9e2c97c5211b9dc Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Wed, 22 Jun 2022 10:50:17 +0530 Subject: [PATCH 08/10] nfs: make use of latest sidecars in the deployment The sidecars in the NFS deployment has latest versions which is also updated for RBD and CephFS drivers. This commit update the versions in the NFS deployment too. Signed-off-by: Humble Chirammal --- deploy/nfs/kubernetes/csi-nfsplugin-provisioner.yaml | 4 ++-- deploy/nfs/kubernetes/csi-nfsplugin.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/nfs/kubernetes/csi-nfsplugin-provisioner.yaml b/deploy/nfs/kubernetes/csi-nfsplugin-provisioner.yaml index a7aa8205d..ecfedd17b 100644 --- a/deploy/nfs/kubernetes/csi-nfsplugin-provisioner.yaml +++ b/deploy/nfs/kubernetes/csi-nfsplugin-provisioner.yaml @@ -55,7 +55,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.5.0 args: - "--csi-address=$(ADDRESS)" - "--v=5" @@ -71,7 +71,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1 + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.0.1 args: - "--csi-address=$(ADDRESS)" - "--v=5" diff --git a/deploy/nfs/kubernetes/csi-nfsplugin.yaml b/deploy/nfs/kubernetes/csi-nfsplugin.yaml index 765896e16..ab466af8f 100644 --- a/deploy/nfs/kubernetes/csi-nfsplugin.yaml +++ b/deploy/nfs/kubernetes/csi-nfsplugin.yaml @@ -18,7 +18,7 @@ spec: - --probe-timeout=3s - --health-port=29653 - --v=2 - image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0 imagePullPolicy: IfNotPresent name: liveness-probe resources: From aed7d8d4e41badb82a91cf51f60f207f3acfcb83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jun 2022 20:15:26 +0000 Subject: [PATCH 09/10] rebase: bump k8s.io/kubernetes from 1.24.1 to 1.24.2 Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.24.1 to 1.24.2. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.24.1...v1.24.2) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 66 ++++++++-------- go.sum | 78 +++++++++---------- vendor/k8s.io/apiserver/pkg/audit/union.go | 1 + .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 23 ++++++ .../kubernetes/test/utils/image/manifest.go | 2 +- vendor/modules.txt | 76 +++++++++--------- 6 files changed, 135 insertions(+), 111 deletions(-) diff --git a/go.mod b/go.mod index d8407024f..977bb63ec 100644 --- a/go.mod +++ b/go.mod @@ -28,16 +28,16 @@ require ( golang.org/x/sys v0.0.0-20220209214540-3681064d5158 google.golang.org/grpc v1.47.0 google.golang.org/protobuf v1.28.0 - k8s.io/api v0.24.1 - k8s.io/apimachinery v0.24.1 + k8s.io/api v0.24.2 + k8s.io/apimachinery v0.24.2 k8s.io/client-go v12.0.0+incompatible - k8s.io/cloud-provider v0.24.1 + k8s.io/cloud-provider v0.24.2 k8s.io/klog/v2 v2.60.1 // // when updating k8s.io/kubernetes, make sure to update the replace section too // - k8s.io/kubernetes v1.24.1 - k8s.io/mount-utils v0.24.1 + k8s.io/kubernetes v1.24.2 + k8s.io/mount-utils v0.24.2 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 ) @@ -153,9 +153,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.23.0 // indirect - k8s.io/apiserver v0.24.1 // indirect - k8s.io/component-base v0.24.1 // indirect - k8s.io/component-helpers v0.24.1 // indirect + k8s.io/apiserver v0.24.2 // indirect + k8s.io/component-base v0.24.2 // indirect + k8s.io/component-helpers v0.24.2 // indirect k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect @@ -175,31 +175,31 @@ replace ( // // k8s.io/kubernetes depends on these k8s.io packages, but unversioned // - k8s.io/api => k8s.io/api v0.24.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.24.1 - k8s.io/apiserver => k8s.io/apiserver v0.24.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.1 - k8s.io/client-go => k8s.io/client-go v0.24.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.1 - k8s.io/code-generator => k8s.io/code-generator v0.24.1 - k8s.io/component-base => k8s.io/component-base v0.24.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.24.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.24.1 - k8s.io/cri-api => k8s.io/cri-api v0.24.1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.1 - k8s.io/kubectl => k8s.io/kubectl v0.24.1 - k8s.io/kubelet => k8s.io/kubelet v0.24.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.1 - k8s.io/metrics => k8s.io/metrics v0.24.1 - k8s.io/mount-utils => k8s.io/mount-utils v0.24.1 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.1 + k8s.io/api => k8s.io/api v0.24.2 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.24.2 + k8s.io/apiserver => k8s.io/apiserver v0.24.2 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.2 + k8s.io/client-go => k8s.io/client-go v0.24.2 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.2 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.2 + k8s.io/code-generator => k8s.io/code-generator v0.24.2 + k8s.io/component-base => k8s.io/component-base v0.24.2 + k8s.io/component-helpers => k8s.io/component-helpers v0.24.2 + k8s.io/controller-manager => k8s.io/controller-manager v0.24.2 + k8s.io/cri-api => k8s.io/cri-api v0.24.2 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.2 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.2 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.2 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.2 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.2 + k8s.io/kubectl => k8s.io/kubectl v0.24.2 + k8s.io/kubelet => k8s.io/kubelet v0.24.2 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.2 + k8s.io/metrics => k8s.io/metrics v0.24.2 + k8s.io/mount-utils => k8s.io/mount-utils v0.24.2 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.2 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.2 // layeh.com seems to be misbehaving layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 ) diff --git a/go.sum b/go.sum index 03b802029..7e79198f7 100644 --- a/go.sum +++ b/go.sum @@ -1742,28 +1742,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= -k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= -k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= -k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q= -k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= -k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.1 h1:LAA5UpPOeaREEtFAQRUQOI3eE5So/j5J3zeQJjeLdz4= -k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0= -k8s.io/cli-runtime v0.24.1/go.mod h1:14aVvCTqkA7dNXY51N/6hRY3GUjchyWDOwW84qmR3bs= -k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= -k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= -k8s.io/cloud-provider v0.24.1 h1:SaQNq2Ax+epdY9wFngwN9GWpOVnM72hUqr2qy20cOvg= -k8s.io/cloud-provider v0.24.1/go.mod h1:h5m/KIiwiQ76hpUBsgrwm/rxteIfJG9kJQ/+/w1as2M= -k8s.io/cluster-bootstrap v0.24.1/go.mod h1:uq2PiYfKh8ZLb6DBU/3/2Z1DkMqXkTOHLemalC4tOgE= -k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= -k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= -k8s.io/component-helpers v0.24.1 h1:pk68RSRhkGX75nhtAkilguKbq/0MyXbQqmrZoQu4nbs= -k8s.io/component-helpers v0.24.1/go.mod h1:q5Z1pWV/QfX9ThuNeywxasiwkLw9KsR4Q9TAOdb/Y3s= -k8s.io/controller-manager v0.24.1/go.mod h1:g105ENexD6A2holEq7Bl6ae+69LJHiLnoEEm7wkE6sc= -k8s.io/cri-api v0.24.1/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig= -k8s.io/csi-translation-lib v0.24.1/go.mod h1:16nY6xx3XR4+TASMfTtake2ouK1IPz0t/baNmngzR4I= +k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= +k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= +k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= +k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= +k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.24.2 h1:orxipm5elPJSkkFNlwH9ClqaKEDJJA3yR2cAAlCnyj4= +k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= +k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= +k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= +k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= +k8s.io/cloud-provider v0.24.2 h1:DYNf90zS/GAQbEHsTfJsH4Oas7vim4U+WU9GftMQlfs= +k8s.io/cloud-provider v0.24.2/go.mod h1:a7jyWjizk+IKbcIf8+mX2cj3NvpRv9ZyGdXDyb8UEkI= +k8s.io/cluster-bootstrap v0.24.2/go.mod h1:eIHV338K03vBm3u/ROZiNXxWJ4AJRoTR9PEUhcTvYkg= +k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= +k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= +k8s.io/component-helpers v0.24.2 h1:gtXmI/TjVINtkAdZn7m5p8+Vd0Mk4d1q8kwJMMLBdwY= +k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= +k8s.io/controller-manager v0.24.2/go.mod h1:hpwCof4KxP4vrw/M5QiVxU6Zmmggmr1keGXtjGHF+vc= +k8s.io/cri-api v0.24.2/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig= +k8s.io/csi-translation-lib v0.24.2/go.mod h1:pdHc2CYLViQYYsOqOp79hjKYi8J4NZ7vpiVzn1SqBrg= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1777,28 +1777,28 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.24.1/go.mod h1:vZvRALCO32hrIuREhkYwLq5Crc0zh6SxzJDAKrQM1+k= -k8s.io/kube-controller-manager v0.24.1/go.mod h1:IlXY8FozezzIBNcfA6TV1//fjz9gNy3LGbigDnX7Q3A= +k8s.io/kube-aggregator v0.24.2/go.mod h1:Ju2jNDixn+vqeeKEBfjfpc204bO1pbdXX0N9knCxeMQ= +k8s.io/kube-controller-manager v0.24.2/go.mod h1:KDE0yqiEvxYiO0WRpPA4rVx8AcK1vsWydUF37AJ9lTI= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-proxy v0.24.1/go.mod h1:Q19uL+muS7Q0rxIXlddcanbGcogbDcX5I86GROhrwOM= -k8s.io/kube-scheduler v0.24.1/go.mod h1:mxSsC5sg710qdrN9oY+OSkHRSgYOv6qA2vEEt1t6Ax4= -k8s.io/kubectl v0.24.1 h1:gxcjHrnwntV1c+G/BHWVv4Mtk8CQJ0WTraElLBG+ddk= -k8s.io/kubectl v0.24.1/go.mod h1:NzFqQ50B004fHYWOfhHTrAm4TY6oGF5FAAL13LEaeUI= -k8s.io/kubelet v0.24.1 h1:CLgXZ9kKDQoNQFSwKk6vUE5gXNaX1/s8VM8Oq/P5S+o= -k8s.io/kubelet v0.24.1/go.mod h1:LShXfjNO1or7ktsorODSOu8+Kd5dHzWF3DtVLXeP3JE= -k8s.io/kubernetes v1.24.1 h1:cfRZCNrJN9hR49SBSGLHhn+IdAcfx6OVXadGvWuvYaM= -k8s.io/kubernetes v1.24.1/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0= -k8s.io/legacy-cloud-providers v0.24.1/go.mod h1:OeDg+OJ5uzmJQyh6vpCkwGY8tVegaiokWErGr7YlSaI= -k8s.io/metrics v0.24.1/go.mod h1:vMs5xpcOyY9D+/XVwlaw8oUHYCo6JTGBCZfyXOOkAhE= -k8s.io/mount-utils v0.24.1 h1:juKCvkiP4sWklb72OIk/qW7UhDns41ldcR/EHu/T1uA= -k8s.io/mount-utils v0.24.1/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI= -k8s.io/pod-security-admission v0.24.1 h1:CNcUKc06PgejhdvK1rqBgo5xcpirsl3O574cfKt4hxk= -k8s.io/pod-security-admission v0.24.1/go.mod h1:ZH6e17BuFFdiYHFxn9X6d7iaPj3JyuqBOw/MRytVWp8= -k8s.io/sample-apiserver v0.24.1/go.mod h1:5L12FaHPjpJzr0s/ClAx61Ig5uBjDCvthtmTIORu7F8= +k8s.io/kube-proxy v0.24.2/go.mod h1:bozS2ufl/Ns6s40Ue34eV7rqyLVygi5usSmCgW7rFU8= +k8s.io/kube-scheduler v0.24.2/go.mod h1:DRa+aeXKSYUUOHHIc/9EcaO9+FW5FydaOfPSvaSW5Ko= +k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= +k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= +k8s.io/kubelet v0.24.2 h1:VAvULig8RiylCtyxudgHV7nhKsLnNIrdVBCRD4bXQ3Y= +k8s.io/kubelet v0.24.2/go.mod h1:Xm9DkWQjwOs+uGOUIIGIPMvvmenvj0lDVOErvIKOOt0= +k8s.io/kubernetes v1.24.2 h1:AyjtHzSysliKR04Km91njmk2yaKmOa3ZISQZCIGUnVI= +k8s.io/kubernetes v1.24.2/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0= +k8s.io/legacy-cloud-providers v0.24.2/go.mod h1:sgkasgIP2ZOew8fzoOq0mQLVXJ4AmB57IUbFUjzPWEo= +k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= +k8s.io/mount-utils v0.24.2 h1:UTHxyMz2LGBgapLcyl2y2hxWwJHotdmKLqV7CEf0fys= +k8s.io/mount-utils v0.24.2/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI= +k8s.io/pod-security-admission v0.24.2 h1:Wl92TCvxsqba+kDK59Dnf/qIsSoP1ekRlj5qT1XEmNk= +k8s.io/pod-security-admission v0.24.2/go.mod h1:znnuDHWWWvh/tpbYYPwTsd4y//qHi3cOX+wGxET/mMI= +k8s.io/sample-apiserver v0.24.2/go.mod h1:mf8qgDdu450wqpCJOkSAmoTgU4PIMAcfa5uTBwmJekE= k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/k8s.io/apiserver/pkg/audit/union.go b/vendor/k8s.io/apiserver/pkg/audit/union.go index 39dd74f74..0766a9207 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/union.go +++ b/vendor/k8s.io/apiserver/pkg/audit/union.go @@ -48,6 +48,7 @@ func (u union) ProcessEvents(events ...*auditinternal.Event) bool { func (u union) Run(stopCh <-chan struct{}) error { var funcs []func() error for _, backend := range u.backends { + backend := backend funcs = append(funcs, func() error { return backend.Run(stopCh) }) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go index 9560121bb..8bfc21a67 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -301,12 +301,28 @@ func IsPodReady(pod *v1.Pod) bool { return IsPodReadyConditionTrue(pod.Status) } +// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress. +func IsPodTerminal(pod *v1.Pod) bool { + return IsPodPhaseTerminal(pod.Status.Phase) +} + +// IsPhaseTerminal returns true if the pod's phase is terminal. +func IsPodPhaseTerminal(phase v1.PodPhase) bool { + return phase == v1.PodFailed || phase == v1.PodSucceeded +} + // IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. func IsPodReadyConditionTrue(status v1.PodStatus) bool { condition := GetPodReadyCondition(status) return condition != nil && condition.Status == v1.ConditionTrue } +// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsContainersReadyConditionTrue(status v1.PodStatus) bool { + condition := GetContainersReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + // GetPodReadyCondition extracts the pod ready condition from the given status and returns that. // Returns nil if the condition is not present. func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { @@ -314,6 +330,13 @@ func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { return condition } +// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.ContainersReady) + return condition +} + // GetPodCondition extracts the provided condition from the given status and returns that. // Returns nil and -1 if the condition is not present, and the index of the located condition. func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 820c1597d..143544369 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -231,7 +231,7 @@ const ( func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) { configs := map[int]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.36"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.39"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} diff --git a/vendor/modules.txt b/vendor/modules.txt index 292b1baea..a38879663 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -772,7 +772,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.24.1 => k8s.io/api v0.24.1 +# k8s.io/api v0.24.2 => k8s.io/api v0.24.2 ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -821,11 +821,11 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.23.0 => k8s.io/apiextensions-apiserver v0.24.1 +# k8s.io/apiextensions-apiserver v0.23.0 => k8s.io/apiextensions-apiserver v0.24.2 ## explicit; go 1.16 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.24.1 => k8s.io/apimachinery v0.24.1 +# k8s.io/apimachinery v0.24.2 => k8s.io/apimachinery v0.24.2 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -881,7 +881,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.24.1 => k8s.io/apiserver v0.24.1 +# k8s.io/apiserver v0.24.2 => k8s.io/apiserver v0.24.2 ## explicit; go 1.16 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -920,7 +920,7 @@ k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.24.1 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.24.2 ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1164,12 +1164,12 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.24.1 => k8s.io/cloud-provider v0.24.1 +# k8s.io/cloud-provider v0.24.2 => k8s.io/cloud-provider v0.24.2 ## explicit; go 1.16 k8s.io/cloud-provider k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.24.1 => k8s.io/component-base v0.24.1 +# k8s.io/component-base v0.24.2 => k8s.io/component-base v0.24.2 ## explicit; go 1.16 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1180,7 +1180,7 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/testutil k8s.io/component-base/traces k8s.io/component-base/version -# k8s.io/component-helpers v0.24.1 => k8s.io/component-helpers v0.24.1 +# k8s.io/component-helpers v0.24.2 => k8s.io/component-helpers v0.24.2 ## explicit; go 1.16 k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 @@ -1204,14 +1204,14 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.24.1 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.24.2 ## explicit; go 1.16 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.1 +# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2 ## explicit; go 1.16 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.24.1 +# k8s.io/kubernetes v1.24.2 ## explicit; go 1.16 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1272,10 +1272,10 @@ k8s.io/kubernetes/test/e2e/storage/utils k8s.io/kubernetes/test/e2e/testing-manifests k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image -# k8s.io/mount-utils v0.24.1 => k8s.io/mount-utils v0.24.1 +# k8s.io/mount-utils v0.24.2 => k8s.io/mount-utils v0.24.2 ## explicit; go 1.16 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.24.1 +# k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.24.2 ## explicit; go 1.16 k8s.io/pod-security-admission/api # k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 @@ -1355,29 +1355,29 @@ sigs.k8s.io/yaml # github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 # github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 # gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0 -# k8s.io/api => k8s.io/api v0.24.1 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.1 -# k8s.io/apimachinery => k8s.io/apimachinery v0.24.1 -# k8s.io/apiserver => k8s.io/apiserver v0.24.1 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.1 -# k8s.io/client-go => k8s.io/client-go v0.24.1 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.1 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.1 -# k8s.io/code-generator => k8s.io/code-generator v0.24.1 -# k8s.io/component-base => k8s.io/component-base v0.24.1 -# k8s.io/component-helpers => k8s.io/component-helpers v0.24.1 -# k8s.io/controller-manager => k8s.io/controller-manager v0.24.1 -# k8s.io/cri-api => k8s.io/cri-api v0.24.1 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.1 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.1 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.1 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.1 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.1 -# k8s.io/kubectl => k8s.io/kubectl v0.24.1 -# k8s.io/kubelet => k8s.io/kubelet v0.24.1 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.1 -# k8s.io/metrics => k8s.io/metrics v0.24.1 -# k8s.io/mount-utils => k8s.io/mount-utils v0.24.1 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.1 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.1 +# k8s.io/api => k8s.io/api v0.24.2 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.2 +# k8s.io/apimachinery => k8s.io/apimachinery v0.24.2 +# k8s.io/apiserver => k8s.io/apiserver v0.24.2 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.2 +# k8s.io/client-go => k8s.io/client-go v0.24.2 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.2 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.2 +# k8s.io/code-generator => k8s.io/code-generator v0.24.2 +# k8s.io/component-base => k8s.io/component-base v0.24.2 +# k8s.io/component-helpers => k8s.io/component-helpers v0.24.2 +# k8s.io/controller-manager => k8s.io/controller-manager v0.24.2 +# k8s.io/cri-api => k8s.io/cri-api v0.24.2 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.2 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.2 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.2 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.2 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.2 +# k8s.io/kubectl => k8s.io/kubectl v0.24.2 +# k8s.io/kubelet => k8s.io/kubelet v0.24.2 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.2 +# k8s.io/metrics => k8s.io/metrics v0.24.2 +# k8s.io/mount-utils => k8s.io/mount-utils v0.24.2 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.2 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.2 # layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 From 507844c9b108992f1d6e8b5e8a6d9113e3e5fb9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Knecht?= Date: Wed, 6 Jul 2022 11:03:12 +0200 Subject: [PATCH 10/10] rbd: Use rados namespace when getting clone depth MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the Ceph user is restricted to a specific namespace in the pool, it is crucial that evey interaction with the cluster is done within that namespace. This wasn't the case in `getCloneDepth()`. This issue was causing snapshot creation to fail with > Failed to check and update snapshot content: failed to take snapshot of the > volume X: "rpc error: code = Internal desc = rbd: ret=-1, Operation not > permitted" Signed-off-by: BenoƮt Knecht --- internal/rbd/rbd_util.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index e5ece5658..3d36cf6f7 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -681,6 +681,7 @@ func (ri *rbdImage) getCloneDepth(ctx context.Context) (uint, error) { vol.Pool = ri.Pool vol.Monitors = ri.Monitors vol.RbdImageName = ri.RbdImageName + vol.RadosNamespace = ri.RadosNamespace vol.conn = ri.conn.Copy() for {