From 5524b2d5387e3b5026def9b09c33ab98baff320d Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Fri, 19 Nov 2021 10:07:57 +0530 Subject: [PATCH 01/23] ci: use 1.8.5 vault for e2e current latest vault release is 1.9.0 but with the latest image our E2E is broken. reverting back the vault version to 1.8.5 till we root cause the issue. Note:- This is to unblock PR merging updates: #2657 Signed-off-by: Madhu Rajanna --- examples/kms/vault/vault.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/kms/vault/vault.yaml b/examples/kms/vault/vault.yaml index ad12afbd4..379b8897a 100644 --- a/examples/kms/vault/vault.yaml +++ b/examples/kms/vault/vault.yaml @@ -39,7 +39,7 @@ spec: spec: containers: - name: vault - image: docker.io/library/vault:latest + image: docker.io/library/vault:1.8.5 imagePullPolicy: "IfNotPresent" securityContext: runAsUser: 100 @@ -64,7 +64,7 @@ spec: - name: home mountPath: /home - name: monitor - image: docker.io/library/vault:latest + image: docker.io/library/vault:1.8.5 imagePullPolicy: "IfNotPresent" securityContext: runAsUser: 100 @@ -151,7 +151,7 @@ spec: name: init-scripts containers: - name: vault-init-job - image: docker.io/library/vault:latest + image: docker.io/library/vault:1.8.5 securityContext: runAsUser: 100 volumeMounts: From ecd5d2d46c1150274124e3025d245a2e27248ba7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Nov 2021 20:30:19 +0000 Subject: [PATCH 02/23] rebase: bump sigs.k8s.io/controller-runtime from 0.10.2 to 0.10.3 Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.10.2 to 0.10.3. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.10.2...v0.10.3) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 +-- vendor/modules.txt | 2 +- .../pkg/internal/controller/controller.go | 2 +- .../pkg/manager/internal.go | 31 ++++++++++++------- .../pkg/metrics/listener.go | 2 +- .../controller-runtime/pkg/source/source.go | 6 ++-- .../controller-runtime/pkg/webhook/server.go | 6 ++-- 8 files changed, 32 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 6a32d7102..72f179503 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( k8s.io/kubernetes v1.22.3 k8s.io/mount-utils v0.22.2 k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a - sigs.k8s.io/controller-runtime v0.10.2 + sigs.k8s.io/controller-runtime v0.10.3 ) replace ( diff --git a/go.sum b/go.sum index a2d9e9523..9c7d27b7d 100644 --- a/go.sum +++ b/go.sum @@ -1644,8 +1644,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= -sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= -sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= +sigs.k8s.io/controller-runtime v0.10.3 h1:s5Ttmw/B4AuIbwrXD3sfBkXwnPMMWrqpVj4WRt1dano= +sigs.k8s.io/controller-runtime v0.10.3/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= diff --git a/vendor/modules.txt b/vendor/modules.txt index 7aa78e9f4..1a4ac37c6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1076,7 +1076,7 @@ k8s.io/utils/trace # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.10.2 +# sigs.k8s.io/controller-runtime v0.10.3 ## explicit sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 87431a438..1f4712d8b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -175,7 +175,7 @@ func (c *Controller) Start(ctx context.Context) error { // caches to sync so that they have a chance to register their intendeded // caches. for _, watch := range c.startWatches { - c.Log.Info("Starting EventSource", "source", watch.src) + c.Log.Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src)) if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 7c25bd3c6..59794d962 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -380,7 +380,7 @@ func (cm *controllerManager) serveMetrics() { } // Run the server cm.startRunnable(RunnableFunc(func(_ context.Context) error { - cm.logger.Info("starting metrics server", "path", defaultMetricsEndpoint) + cm.logger.Info("Starting metrics server", "path", defaultMetricsEndpoint) if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { return err } @@ -425,11 +425,13 @@ func (cm *controllerManager) serveHealthProbes() { cm.healthzStarted = true }() - // Shutdown the server when stop is closed - <-cm.internalProceduresStop - if err := server.Shutdown(cm.shutdownCtx); err != nil { - cm.errChan <- err - } + go func() { + // Shutdown the server when stop is closed + <-cm.internalProceduresStop + if err := server.Shutdown(cm.shutdownCtx); err != nil { + cm.errChan <- err + } + }() } func (cm *controllerManager) Start(ctx context.Context) (err error) { @@ -473,9 +475,14 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // Serve health probes if cm.healthProbeListener != nil { - go cm.serveHealthProbes() + cm.serveHealthProbes() } + // Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + cm.startWebhookRunnables() + go cm.startNonLeaderElectionRunnables() go func() { @@ -573,13 +580,10 @@ func (cm *controllerManager) waitForRunnableToEnd(shutdownCancel context.CancelF return nil } -func (cm *controllerManager) startNonLeaderElectionRunnables() { +func (cm *controllerManager) startWebhookRunnables() { cm.mu.Lock() defer cm.mu.Unlock() - // First start any webhook servers, which includes conversion, validation, and defaulting - // webhooks that are registered. - // // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks // to never start because no cache can be populated. @@ -588,6 +592,11 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { cm.startRunnable(c) } } +} + +func (cm *controllerManager) startNonLeaderElectionRunnables() { + cm.mu.Lock() + defer cm.mu.Unlock() // Start and wait for caches. cm.waitForCache(cm.internalCtx) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go index d32ae5818..123d8c15f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -41,7 +41,7 @@ func NewListener(addr string) (net.Listener, error) { return nil, nil } - log.Info("metrics server is starting to listen", "addr", addr) + log.Info("Metrics server is starting to listen", "addr", addr) ln, err := net.Listen("tcp", addr) if err != nil { er := fmt.Errorf("error listening on %s: %w", addr, err) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 708c5a5bf..8f649eaac 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -161,10 +161,10 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w } func (ks *Kind) String() string { - if ks.Type != nil && ks.Type.GetObjectKind() != nil { - return fmt.Sprintf("kind source: %v", ks.Type.GetObjectKind().GroupVersionKind().String()) + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) } - return "kind source: unknown GVK" + return "kind source: unknown type" } // WaitForSync implements SyncingSource to allow controllers to wait with starting diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index d2338d0b7..1db38113f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -142,7 +142,7 @@ func (s *Server) Register(path string, hook http.Handler) { s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) - regLog.Info("registering webhook") + regLog.Info("Registering webhook") // we've already been "started", inject dependencies here. // Otherwise, InjectFunc will do this for us later. @@ -210,7 +210,7 @@ func (s *Server) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") - baseHookLog.Info("starting webhook server") + baseHookLog.Info("Starting webhook server") certPath := filepath.Join(s.CertDir, s.CertName) keyPath := filepath.Join(s.CertDir, s.KeyName) @@ -259,7 +259,7 @@ func (s *Server) Start(ctx context.Context) error { return err } - log.Info("serving webhook server", "host", s.Host, "port", s.Port) + log.Info("Serving webhook server", "host", s.Host, "port", s.Port) srv := &http.Server{ Handler: s.WebhookMux, From c339d43272be1bfdc046a1be6b0a6164a6ddb2f8 Mon Sep 17 00:00:00 2001 From: Yug Gupta Date: Mon, 15 Nov 2021 12:11:50 +0530 Subject: [PATCH 03/23] deploy: deploy erasure coded pool deploy erasure coded pool during rook deployment to allow usage and testing in erasure coded pools. Signed-off-by: Yug Gupta --- scripts/rook.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/scripts/rook.sh b/scripts/rook.sh index aae815dc2..3a460d6ab 100755 --- a/scripts/rook.sh +++ b/scripts/rook.sh @@ -4,6 +4,7 @@ ROOK_VERSION=${ROOK_VERSION:-"v1.6.2"} ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300} ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph" ROOK_BLOCK_POOL_NAME=${ROOK_BLOCK_POOL_NAME:-"newrbdpool"} +ROOK_BLOCK_EC_POOL_NAME=${ROOK_BLOCK_EC_POOL_NAME:-"ec-pool"} SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" # shellcheck disable=SC1091 @@ -116,6 +117,22 @@ function delete_block_pool() { rm -f "./newpool.yaml" } +function create_block_ec_pool() { + curl -o block-pool-ec.yaml "${ROOK_URL}/pool-ec.yaml" + sed -i "s/ec-pool/${ROOK_BLOCK_EC_POOL_NAME}/g" block-pool-ec.yaml + kubectl_retry create -f "./block-pool-ec.yaml" + rm -f "./block-pool-ec.yaml" + + check_rbd_stat "${ROOK_BLOCK_EC_POOL_NAME}" +} + +function delete_block_ec_pool() { + curl -o block-pool-ec.yaml "${ROOK_URL}/pool-ec.yaml" + sed -i "s/ec-pool/${ROOK_BLOCK_EC_POOL_NAME}/g" block-pool-ec.yaml + kubectl delete -f "./block-pool-ec.yaml" + rm -f "./block-pool-ec.yaml" +} + function check_ceph_cluster_health() { for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do echo "Wait for rook deploy... ${retry}s" && sleep 5 @@ -204,6 +221,12 @@ create-block-pool) delete-block-pool) delete_block_pool ;; +create-block-ec-pool) + create_block_ec_pool + ;; +delete-block-ec-pool) + delete_block_ec_pool + ;; *) echo " $0 [command] Available Commands: From 04e99ae2e065b92299a7b71aa23ceb999cb6b5b1 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 8 Nov 2021 15:04:40 +0530 Subject: [PATCH 04/23] ci: update minikube to v1.24.0 update minikube version to latest available version i.e 1.24.0 Signed-off-by: Madhu Rajanna --- build.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.env b/build.env index 77471ec90..d2af354e8 100644 --- a/build.env +++ b/build.env @@ -38,7 +38,7 @@ SNAPSHOT_VERSION=v4.0.0 HELM_VERSION=v3.1.2 # minikube settings -MINIKUBE_VERSION=v1.23.2 +MINIKUBE_VERSION=v1.24.0 VM_DRIVER=none CHANGE_MINIKUBE_NONE_USER=true From df0901ddd8e7f27f4f9b35e4110624022f56332b Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 29 Oct 2021 14:37:04 +0530 Subject: [PATCH 05/23] rbd: add generic ephemeral volume validation This commit adds the validation of csi RBD driver to work with ephemeral volume support. With ephemeral volume support a user can specify ephemeral volumes in its pod spec and tie the lifecycle of the PVC with the POD. An example pod spec is also included in this commit. Signed-off-by: Humble Chirammal --- e2e/rbd.go | 31 +++++++++++++++++++++++++++++++ examples/rbd/pod-ephemeral.yaml | 23 +++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 examples/rbd/pod-ephemeral.yaml diff --git a/e2e/rbd.go b/e2e/rbd.go index e16c5c00c..7d72e7fa2 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -57,6 +57,7 @@ var ( appClonePath = rbdExamplePath + "pod-restore.yaml" appSmartClonePath = rbdExamplePath + "pod-clone.yaml" appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml" + appEphemeralPath = rbdExamplePath + "pod-ephemeral.yaml" snapshotPath = rbdExamplePath + "snapshot.yaml" defaultCloneCount = 10 @@ -367,6 +368,36 @@ var _ = Describe("RBD", func() { } }) } + By("verify generic ephemeral volume support", func() { + // generic ephemeral volume support is supported from 1.21 + if !k8sVersionGreaterEquals(f.ClientSet, 1, 21) { + Skip("generic ephemeral volume only supported from v1.21+") + } + // create application + app, err := loadApp(appEphemeralPath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + app.Namespace = f.UniqueName + err = createApp(f.ClientSet, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create application: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 1, defaultRBDPool) + err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete application: %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0, defaultRBDPool) + // validate images in trash + err = waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout) + if err != nil { + e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err) + } + }) + // todo: may be remove the below deletion test later once the migration nodestage tests are adjusted // also to have deletion validation through the same. By("validate RBD migration+static Block PVC Deletion", func() { diff --git a/examples/rbd/pod-ephemeral.yaml b/examples/rbd/pod-ephemeral.yaml new file mode 100644 index 000000000..697e61ab7 --- /dev/null +++ b/examples/rbd/pod-ephemeral.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: csi-rbd-demo-ephemeral-pod +spec: + containers: + - name: web-server + image: docker.io/library/nginx:latest + volumeMounts: + - mountPath: /myspace + name: mypvc + volumes: + - name: mypvc + ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-rbd-sc + resources: + requests: + storage: 1Gi From e6949945bbfd01d608a113a39595cee165959fa1 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 29 Oct 2021 14:38:33 +0530 Subject: [PATCH 06/23] cephfs: add validation for generic ephemeral volumes This commit adds the validation of csi cephfs driver to work with ephemeral volume support. With ephemeral volume support a user can specify ephemeral volumes in its pod spec and tie the lifecycle of the PVC with the POD. An example POD spec also included in this commit. Signed-off-by: Humble Chirammal --- e2e/cephfs.go | 32 ++++++++++++++++++++++++++++++ examples/cephfs/pod-ephemeral.yaml | 23 +++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 examples/cephfs/pod-ephemeral.yaml diff --git a/e2e/cephfs.go b/e2e/cephfs.go index eb1d553b6..388d6b1a6 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -279,6 +279,7 @@ var _ = Describe("cephfs", func() { appClonePath := cephFSExamplePath + "pod-restore.yaml" appSmartClonePath := cephFSExamplePath + "pod-clone.yaml" snapshotPath := cephFSExamplePath + "snapshot.yaml" + appEphemeralPath := cephFSExamplePath + "pod-ephemeral.yaml" By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) @@ -312,6 +313,37 @@ var _ = Describe("cephfs", func() { } }) } + By("verify generic ephemeral volume support", func() { + // generic ephemeral volume support is beta since v1.21. + if !k8sVersionGreaterEquals(f.ClientSet, 1, 21) { + Skip("generic ephemeral volume only supported from v1.21+") + } + err := createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + // create application + app, err := loadApp(appEphemeralPath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + app.Namespace = f.UniqueName + err = createApp(f.ClientSet, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create application: %v", err) + } + validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) + // delete pod + err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete application: %v", err) + } + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + }) By("check static PVC", func() { scPath := cephFSExamplePath + "secret.yaml" diff --git a/examples/cephfs/pod-ephemeral.yaml b/examples/cephfs/pod-ephemeral.yaml new file mode 100644 index 000000000..7a81ca94c --- /dev/null +++ b/examples/cephfs/pod-ephemeral.yaml @@ -0,0 +1,23 @@ +--- +kind: Pod +apiVersion: v1 +metadata: + name: csi-cephfs-demo-ephemeral-pod +spec: + containers: + - name: web-server + image: docker.io/library/nginx:latest + volumeMounts: + - mountPath: /myspace + name: mypvc + volumes: + - name: mypvc + ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-cephfs-sc + resources: + requests: + storage: 1Gi From aa600754c1e52fe170bbc8d3314fec01054714ff Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Tue, 2 Nov 2021 14:09:00 +0530 Subject: [PATCH 07/23] deploy: remove expandCSIVolumes feature gate from deployment The `expandCSIVolumes` feature gate is beta since kubernetes 1.16 version and we no longer wanted to explictly enable it in the deployment. Signed-off-by: Humble Chirammal --- scripts/minikube.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/minikube.sh b/scripts/minikube.sh index 1cb38f5e8..4a7ca00c6 100755 --- a/scripts/minikube.sh +++ b/scripts/minikube.sh @@ -160,7 +160,7 @@ CSI_RESIZER_VERSION=${CSI_RESIZER_VERSION:-"v1.2.0"} CSI_NODE_DRIVER_REGISTRAR_VERSION=${CSI_NODE_DRIVER_REGISTRAR_VERSION:-"v2.2.0"} #feature-gates for kube -K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"ExpandCSIVolumes=true"} +K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-""} #extra-config for kube https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/ EXTRA_CONFIG_PSP="--extra-config=apiserver.enable-admission-plugins=PodSecurityPolicy --addons=pod-security-policy" From 8488d6bec29a143eb948a055c4d5a97adb1c2ae0 Mon Sep 17 00:00:00 2001 From: Rakshith R Date: Mon, 22 Nov 2021 11:36:15 +0530 Subject: [PATCH 08/23] ci: fix helm chart push for release branches Currently BRANCH_NAME for release branches is not set causing the source in helm chart to be set as sources: - https://github.com/ceph/ceph-csi/tree//charts/ceph-csi-cephfs Current change fixes it. Signed-off-by: Rakshith R --- .github/workflows/publish-artifacts.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/publish-artifacts.yaml b/.github/workflows/publish-artifacts.yaml index c6791ef57..9a062516c 100644 --- a/.github/workflows/publish-artifacts.yaml +++ b/.github/workflows/publish-artifacts.yaml @@ -24,12 +24,9 @@ jobs: username: ${{ secrets.QUAY_IO_USERNAME }} password: ${{ secrets.QUAY_IO_PASSWORD }} - - name: Set build environment based on Git branch name - if: github.ref == 'refs/heads/devel' - run: echo "BRANCH_NAME=devel" >> $GITHUB_ENV - - name: Set build environment variables run: | + echo "BRANCH_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV echo "GITHUB_USER=${{ secrets.CEPH_CSI_BOT_NAME }}" >> $GITHUB_ENV echo "GITHUB_EMAIL=${{ secrets.CEPH_CSI_BOT_EMAIL }}" >> $GITHUB_ENV echo "GITHUB_TOKEN=${{ secrets.CEPH_CSI_BOT_TOKEN }}" >> $GITHUB_ENV From 4785c55bb8bdd2befea3a9edb61dd6f143ef7b38 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 11:33:50 +0530 Subject: [PATCH 09/23] e2e: reformat error message with proper error formatting To make the error return consistent across e2e tests we have decided to remove `with error` presence from the logs and this commit does that for rbd tests. Signed-off-by: Humble Chirammal --- e2e/rbd.go | 626 ++++++++++++++++++++++++++--------------------------- 1 file changed, 313 insertions(+), 313 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index 7d72e7fa2..c381325ab 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -69,20 +69,20 @@ func deployRBDPlugin() { // delete objects deployed by rook data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdProvisionerRBAC, err) } err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true") if err != nil { - e2elog.Failf("failed to delete provisioner rbac %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to delete provisioner rbac %s: %v", rbdDirPath+rbdProvisionerRBAC, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdNodePluginRBAC, err) } err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true") if err != nil { - e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to delete nodeplugin rbac %s: %v", rbdDirPath+rbdNodePluginRBAC, err) } createORDeleteRbdResources(kubectlCreate) @@ -98,12 +98,12 @@ func createORDeleteRbdResources(action kubectlAction) { // createORDeleteRbdResources is used for upgrade testing as csidriverObject is // newly added, discarding file not found error. if !os.IsNotExist(err) { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+csiDriverObject, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+csiDriverObject, err) } } else { err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout) if err != nil { - e2elog.Failf("failed to %s CSIDriver object with error %v", action, err) + e2elog.Failf("failed to %s CSIDriver object: %v", action, err) } } cephConf, err := ioutil.ReadFile(examplePath + cephConfconfigMap) @@ -111,78 +111,78 @@ func createORDeleteRbdResources(action kubectlAction) { // createORDeleteRbdResources is used for upgrade testing as cephConf Configmap is // newly added, discarding file not found error. if !os.IsNotExist(err) { - e2elog.Failf("failed to read content from %s with error %v", examplePath+cephConfconfigMap, err) + e2elog.Failf("failed to read content from %s: %v", examplePath+cephConfconfigMap, err) } } else { err = retryKubectlInput(cephCSINamespace, action, string(cephConf), deployTimeout) if err != nil { - e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err) + e2elog.Failf("failed to %s ceph-conf configmap object: %v", action, err) } } data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisioner) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisioner, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdProvisioner, err) } data = oneReplicaDeployYaml(data) data = enableTopologyInTemplate(data) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s rbd provisioner with error %v", action, err) + e2elog.Failf("failed to %s rbd provisioner: %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdProvisionerRBAC, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s provisioner rbac with error %v", action, err) + e2elog.Failf("failed to %s provisioner rbac: %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerPSP) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerPSP, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdProvisionerPSP, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s provisioner psp with error %v", action, err) + e2elog.Failf("failed to %s provisioner psp: %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePlugin) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePlugin, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdNodePlugin, err) } domainLabel := nodeRegionLabel + "," + nodeZoneLabel data = addTopologyDomainsToDSYaml(data, domainLabel) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s nodeplugin with error %v", action, err) + e2elog.Failf("failed to %s nodeplugin: %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdNodePluginRBAC, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s nodeplugin rbac with error %v", action, err) + e2elog.Failf("failed to %s nodeplugin rbac: %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginPSP) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginPSP, err) + e2elog.Failf("failed to read content from %s: %v", rbdDirPath+rbdNodePluginPSP, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s nodeplugin psp with error %v", action, err) + e2elog.Failf("failed to %s nodeplugin psp: %v", action, err) } } func validateRBDImageCount(f *framework.Framework, count int, pool string) { imageList, err := listRBDImages(f, pool) if err != nil { - e2elog.Failf("failed to list rbd images with error %v", err) + e2elog.Failf("failed to list rbd images: %v", err) } if len(imageList) != count { e2elog.Failf( @@ -207,67 +207,67 @@ var _ = Describe("RBD", func() { if deployRBD { err := createNodeLabel(f, nodeRegionLabel, regionValue) if err != nil { - e2elog.Failf("failed to create node label with error %v", err) + e2elog.Failf("failed to create node label: %v", err) } err = createNodeLabel(f, nodeZoneLabel, zoneValue) if err != nil { - e2elog.Failf("failed to create node label with error %v", err) + e2elog.Failf("failed to create node label: %v", err) } if cephCSINamespace != defaultNs { err = createNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to create namespace with error %v", err) + e2elog.Failf("failed to create namespace: %v", err) } } deployRBDPlugin() } err := createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } // Since helm deploys storageclass, skip storageclass creation if // ceph-csi is deployed via helm. if !helmTest { err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } } // create rbd provisioner secret key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", "")) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDProvisionerUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err) } err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key) if err != nil { - e2elog.Failf("failed to create provisioner secret with error %v", err) + e2elog.Failf("failed to create provisioner secret: %v", err) } // create rbd plugin secret key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", "")) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDNodePluginUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err) } err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key) if err != nil { - e2elog.Failf("failed to create node secret with error %v", err) + e2elog.Failf("failed to create node secret: %v", err) } deployVault(f.ClientSet, deployTimeout) // wait for provisioner deployment err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err) + e2elog.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err) } // wait for nodeplugin deamonset pods err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err) + e2elog.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err) } kernelRelease, err = getKernelVersionFromDaemonset(f, cephCSINamespace, rbdDaemonsetName, "csi-rbdplugin") if err != nil { - e2elog.Failf("failed to get the kernel version with error %v", err) + e2elog.Failf("failed to get the kernel version: %v", err) } // default io-timeout=0, needs kernel >= 5.4 if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) { @@ -293,23 +293,23 @@ var _ = Describe("RBD", func() { err := deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete provisioner secret with error %v", err) + e2elog.Failf("failed to delete provisioner secret: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete node secret with error %v", err) + e2elog.Failf("failed to delete node secret: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // deleteResource(rbdExamplePath + "snapshotclass.yaml") deleteVault() @@ -318,26 +318,26 @@ var _ = Describe("RBD", func() { if cephCSINamespace != defaultNs { err = deleteNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to delete namespace with error %v", err) + e2elog.Failf("failed to delete namespace: %v", err) } } } err = deleteNodeLabel(c, nodeRegionLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } err = deleteNodeLabel(c, nodeZoneLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } // Remove the CSI labels that get added err = deleteNodeLabel(c, nodeCSIRegionLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } err = deleteNodeLabel(c, nodeCSIZoneLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } }) @@ -348,23 +348,23 @@ var _ = Describe("RBD", func() { By("verify PVC and app binding on helm installation", func() { err := validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) // Deleting the storageclass and secret created by helm err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = deleteResource(rbdExamplePath + "secret.yaml") if err != nil { - e2elog.Failf("failed to delete secret with error %v", err) + e2elog.Failf("failed to delete secret: %v", err) } // Re-create the RBD storageclass err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) } @@ -403,43 +403,43 @@ var _ = Describe("RBD", func() { By("validate RBD migration+static Block PVC Deletion", func() { err := generateClusterIDConfigMapForMigration(f, c) if err != nil { - e2elog.Failf("failed to generate clusterID configmap with error %v", err) + e2elog.Failf("failed to generate clusterID configmap: %v", err) } // create a sc with different migration secret err = createMigrationUserSecretAndSC(f, "migrationsc") if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateRBDStaticMigrationPVDeletion(f, rawAppPath, "migrationsc", true) if err != nil { - e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err) + e2elog.Failf("failed to validate rbd migrated static block pv: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } err = deleteProvNodeMigrationSecret(f, true, true) if err != nil { - e2elog.Failf("failed to delete migration users and Secrets associated with error %v", err) + e2elog.Failf("failed to delete migration users and Secrets associated: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and validate owner", func() { err := validateImageOwner(pvcPath, f) if err != nil { - e2elog.Failf("failed to validate owner of pvc with error %v", err) + e2elog.Failf("failed to validate owner of pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -448,7 +448,7 @@ var _ = Describe("RBD", func() { By("create a PVC and bind it to an app", func() { err := validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -457,7 +457,7 @@ var _ = Describe("RBD", func() { By("create a PVC and bind it to an app with normal user", func() { err := validateNormalUserPVCAccess(pvcPath, f) if err != nil { - e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err) + e2elog.Failf("failed to validate normal user pvc and application binding: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -466,7 +466,7 @@ var _ = Describe("RBD", func() { By("create a PVC and bind it to an app with ext4 as the FS ", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -476,28 +476,28 @@ var _ = Describe("RBD", func() { map[string]string{"csi.storage.k8s.io/fstype": "ext4"}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and bind it to an app using rbd-nbd mounter", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -511,21 +511,21 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -533,7 +533,7 @@ var _ = Describe("RBD", func() { if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // Storage class with rbd-nbd mounter err = createRBDStorageClass( @@ -548,12 +548,12 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // Block PVC resize err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Failf("failed to resize block PVC with error %v", err) + e2elog.Failf("failed to resize block PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -561,17 +561,17 @@ var _ = Describe("RBD", func() { // FileSystem PVC resize err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to resize filesystem PVC with error %v", err) + e2elog.Failf("failed to resize filesystem PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } } }) @@ -579,7 +579,7 @@ var _ = Describe("RBD", func() { By("perform IO on rbd-nbd volume after nodeplugin restart", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // Storage class with rbd-nbd mounter err = createRBDStorageClass( @@ -594,17 +594,17 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName @@ -615,7 +615,7 @@ var _ = Describe("RBD", func() { app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } appOpt := metav1.ListOptions{ @@ -638,18 +638,18 @@ var _ = Describe("RBD", func() { selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName) if err != nil { - e2elog.Failf("failed to get the labels with error %v", err) + e2elog.Failf("failed to get the labels: %v", err) } // delete rbd nodeplugin pods err = deletePodWithLabel(selector, cephCSINamespace, false) if err != nil { - e2elog.Failf("fail to delete pod with error %v", err) + e2elog.Failf("fail to delete pod: %v", err) } // wait for nodeplugin pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset pods with error %v", err) + e2elog.Failf("timeout waiting for daemonset pods: %v", err) } opt := metav1.ListOptions{ @@ -724,24 +724,24 @@ var _ = Describe("RBD", func() { err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // Storage class with rbd-nbd mounter err = createRBDStorageClass( @@ -757,28 +757,28 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) if err != nil { - e2elog.Failf("failed to validate encrypted pvc with error %v", err) + e2elog.Failf("failed to validate encrypted pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -788,28 +788,28 @@ var _ = Describe("RBD", func() { map[string]string{"encrypted": "true"}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) if err != nil { - e2elog.Failf("failed to validate encrypted pvc with error %v", err) + e2elog.Failf("failed to validate encrypted pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("Resize Encrypted Block PVC and check Device size", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -819,13 +819,13 @@ var _ = Describe("RBD", func() { map[string]string{"encrypted": "true"}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // FileSystem PVC resize err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to resize filesystem PVC with error %v", err) + e2elog.Failf("failed to resize filesystem PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -833,25 +833,25 @@ var _ = Describe("RBD", func() { // Block PVC resize err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Failf("failed to resize block PVC with error %v", err) + e2elog.Failf("failed to resize block PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -859,28 +859,28 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultKMS, f) if err != nil { - e2elog.Failf("failed to validate encrypted pvc with error %v", err) + e2elog.Failf("failed to validate encrypted pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -888,7 +888,7 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // name(space) of the Tenant @@ -906,7 +906,7 @@ var _ = Describe("RBD", func() { err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, vaultTokensKMS, f) if err != nil { - e2elog.Failf("failed to validate encrypted pvc with error %v", err) + e2elog.Failf("failed to validate encrypted pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -919,11 +919,11 @@ var _ = Describe("RBD", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -966,7 +966,7 @@ var _ = Describe("RBD", func() { By("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -974,21 +974,21 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, noKMS, f) if err != nil { - e2elog.Failf("failed to validate encrypted pvc with error %v", err) + e2elog.Failf("failed to validate encrypted pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -1100,7 +1100,7 @@ var _ = Describe("RBD", func() { func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -1115,19 +1115,19 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }, ) @@ -1160,31 +1160,31 @@ var _ = Describe("RBD", func() { By("create a thick-provisioned PVC-PVC clone and bind it to an app", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{ "thickProvision": "true", }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } validatePVCClone(1, pvcPath, appPath, pvcSmartClonePath, appSmartClonePath, noKMS, isThickPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create an encrypted PVC snapshot and restore it for an app with VaultKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -1192,7 +1192,7 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } validatePVCSnapshot(1, @@ -1203,11 +1203,11 @@ var _ = Describe("RBD", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -1337,7 +1337,7 @@ var _ = Describe("RBD", func() { By("create an encrypted PVC-PVC clone and bind it to an app", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -1345,25 +1345,25 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } validatePVCClone(1, pvcPath, appPath, pvcSmartClonePath, appSmartClonePath, secretsMetadataKMS, isEncryptedPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create an encrypted PVC-PVC clone and bind it to an app with VaultKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } scOpts := map[string]string{ "encrypted": "true", @@ -1371,31 +1371,31 @@ var _ = Describe("RBD", func() { } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } validatePVCClone(1, pvcPath, appPath, pvcSmartClonePath, appSmartClonePath, vaultKMS, isEncryptedPVC, f) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a block type PVC and bind it to an app", func() { err := validatePVCAndAppBinding(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } }) By("create a Block mode PVC-PVC clone and bind it to an app", func() { _, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Failf("failed to get server version with error %v", err) + e2elog.Failf("failed to get server version: %v", err) } validatePVCClone( defaultCloneCount, @@ -1411,13 +1411,13 @@ var _ = Describe("RBD", func() { totalCount := 2 pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName // create PVC and app @@ -1425,7 +1425,7 @@ var _ = Describe("RBD", func() { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := createPVCAndApp(name, f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } } @@ -1436,7 +1436,7 @@ var _ = Describe("RBD", func() { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := deletePVCAndApp(name, f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } } @@ -1448,7 +1448,7 @@ var _ = Describe("RBD", func() { By("check data persist after recreating pod", func() { err := checkDataPersist(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to check data persist with error %v", err) + e2elog.Failf("failed to check data persist: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1462,7 +1462,7 @@ var _ = Describe("RBD", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -1472,11 +1472,11 @@ var _ = Describe("RBD", func() { map[string]string{"csi.storage.k8s.io/fstype": "xfs"}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to resize filesystem PVC with error %v", err) + e2elog.Failf("failed to resize filesystem PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1485,7 +1485,7 @@ var _ = Describe("RBD", func() { By("Resize Block PVC and check Device size", func() { err := resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Failf("failed to resize block PVC with error %v", err) + e2elog.Failf("failed to resize block PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1494,18 +1494,18 @@ var _ = Describe("RBD", func() { By("Test unmount after nodeplugin restart", func() { pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images @@ -1513,17 +1513,17 @@ var _ = Describe("RBD", func() { // delete rbd nodeplugin pods err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false) if err != nil { - e2elog.Failf("fail to delete pod with error %v", err) + e2elog.Failf("fail to delete pod: %v", err) } // wait for nodeplugin pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset pods with error %v", err) + e2elog.Failf("timeout waiting for daemonset pods: %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1533,7 +1533,7 @@ var _ = Describe("RBD", func() { volumeNamePrefix := "foo-bar-" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -1543,17 +1543,17 @@ var _ = Describe("RBD", func() { map[string]string{"volumeNamePrefix": volumeNamePrefix}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // set up PVC pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images @@ -1562,7 +1562,7 @@ var _ = Describe("RBD", func() { foundIt := false images, err := listRBDImages(f, defaultRBDPool) if err != nil { - e2elog.Failf("failed to list rbd images with error %v", err) + e2elog.Failf("failed to list rbd images: %v", err) } for _, imgName := range images { fmt.Printf("Checking prefix on %s\n", imgName) @@ -1576,18 +1576,18 @@ var _ = Describe("RBD", func() { // clean up after ourselves err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } if !foundIt { e2elog.Failf("could not find image with prefix %s", volumeNamePrefix) @@ -1597,7 +1597,7 @@ var _ = Describe("RBD", func() { By("validate RBD static FileSystem PVC", func() { err := validateRBDStaticPV(f, appPath, false, false) if err != nil { - e2elog.Failf("failed to validate rbd static pv with error %v", err) + e2elog.Failf("failed to validate rbd static pv: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1606,7 +1606,7 @@ var _ = Describe("RBD", func() { By("validate RBD static Block PVC", func() { err := validateRBDStaticPV(f, rawAppPath, true, false) if err != nil { - e2elog.Failf("failed to validate rbd block pv with error %v", err) + e2elog.Failf("failed to validate rbd block pv: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1615,66 +1615,66 @@ var _ = Describe("RBD", func() { By("validate RBD migration+static FileSystem PVC", func() { err := generateClusterIDConfigMapForMigration(f, c) if err != nil { - e2elog.Failf("failed to generate clusterID configmap with error %v", err) + e2elog.Failf("failed to generate clusterID configmap: %v", err) } // create node user and migration secret. err = createProvNodeCephUserAndSecret(f, false, true) if err != nil { - e2elog.Failf("failed to create users and secret with error %v", err) + e2elog.Failf("failed to create users and secret: %v", err) } err = validateRBDStaticMigrationPV(f, appPath, rbdMigrationNodePluginSecretName, false) if err != nil { - e2elog.Failf("failed to validate rbd migrated static pv with error %v", err) + e2elog.Failf("failed to validate rbd migrated static pv: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteProvNodeMigrationSecret(f, false, true) if err != nil { - e2elog.Failf("failed to delete users and secret with error %v", err) + e2elog.Failf("failed to delete users and secret: %v", err) } err = deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } }) By("validate RBD migration+static Block PVC", func() { err := generateClusterIDConfigMapForMigration(f, c) if err != nil { - e2elog.Failf("failed to generate clusterID configmap with error %v", err) + e2elog.Failf("failed to generate clusterID configmap: %v", err) } // create node user and migration secret. err = createProvNodeCephUserAndSecret(f, false, true) if err != nil { - e2elog.Failf("failed to create users and secret with error %v", err) + e2elog.Failf("failed to create users and secret: %v", err) } err = validateRBDStaticMigrationPV(f, rawAppPath, rbdMigrationNodePluginSecretName, true) if err != nil { - e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err) + e2elog.Failf("failed to validate rbd migrated static block pv: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteProvNodeMigrationSecret(f, false, true) if err != nil { - e2elog.Failf("failed to delete users and secret with error %v", err) + e2elog.Failf("failed to delete users and secret: %v", err) } err = deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } }) @@ -1691,7 +1691,7 @@ var _ = Describe("RBD", func() { mountFlags := []string{"discard"} err := checkMountOptions(pvcPath, appPath, f, mountFlags) if err != nil { - e2elog.Failf("failed to check mount options with error %v", err) + e2elog.Failf("failed to check mount options: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -1701,18 +1701,18 @@ var _ = Describe("RBD", func() { By("checking node has required CSI topology labels set", func() { err := checkNodeHasLabel(f.ClientSet, nodeCSIRegionLabel, regionValue) if err != nil { - e2elog.Failf("failed to check node label with error %v", err) + e2elog.Failf("failed to check node label: %v", err) } err = checkNodeHasLabel(f.ClientSet, nodeCSIZoneLabel, zoneValue) if err != nil { - e2elog.Failf("failed to check node label with error %v", err) + e2elog.Failf("failed to check node label: %v", err) } }) By("creating a StorageClass with delayed binding mode and CSI topology parameter") err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"domainSegments\":" + "[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," + @@ -1721,46 +1721,46 @@ var _ = Describe("RBD", func() { map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } By("creating an app using a PV from the delayed binding mode StorageClass") pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, 0) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } By("ensuring created PV has required node selector values populated") err = checkPVSelectorValuesForPVC(f, pvc) if err != nil { - e2elog.Failf("failed to check pv selector values with error %v", err) + e2elog.Failf("failed to check pv selector values: %v", err) } By("ensuring created PV has its image in the topology specific pool") err = checkPVCImageInPool(f, pvc, rbdTopologyPool) if err != nil { - e2elog.Failf("failed to check image in pool with error %v", err) + e2elog.Failf("failed to check image in pool: %v", err) } By("ensuring created PV has its image journal in the topology specific pool") err = checkPVCImageJournalInPool(f, pvc, rbdTopologyPool) if err != nil { - e2elog.Failf("failed to check image journal with error %v", err) + e2elog.Failf("failed to check image journal: %v", err) } By("ensuring created PV has its CSI journal in the CSI journal specific pool") err = checkPVCCSIJournalInPool(f, pvc, "replicapool") if err != nil { - e2elog.Failf("failed to check csi journal in pool with error %v", err) + e2elog.Failf("failed to check csi journal in pool: %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } By("checking if data pool parameter is honored", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool + "\",\"domainSegments\":" + @@ -1770,41 +1770,41 @@ var _ = Describe("RBD", func() { map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } By("creating an app using a PV from the delayed binding mode StorageClass with a data pool") pvc, app, err = createPVCAndAppBinding(pvcPath, appPath, f, 0) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } By("ensuring created PV has its image in the topology specific pool") err = checkPVCImageInPool(f, pvc, rbdTopologyPool) if err != nil { - e2elog.Failf("failed to check pvc image in pool with error %v", err) + e2elog.Failf("failed to check pvc image in pool: %v", err) } By("ensuring created image has the right data pool parameter set") err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool) if err != nil { - e2elog.Failf("failed to check data pool for image with error %v", err) + e2elog.Failf("failed to check data pool for image: %v", err) } // cleanup and undo changes made by the test err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } }) // cleanup and undo changes made by the test err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -1813,7 +1813,7 @@ var _ = Describe("RBD", func() { By("Mount pvc to pod with invalid mount option", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( f.ClientSet, @@ -1823,22 +1823,22 @@ var _ = Describe("RBD", func() { nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -1850,18 +1850,18 @@ var _ = Describe("RBD", func() { } err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -1870,11 +1870,11 @@ var _ = Describe("RBD", func() { // create pool for clones err := createPool(f, clonePool) if err != nil { - e2elog.Failf("failed to create pool %s with error %v", clonePool, err) + e2elog.Failf("failed to create pool %s: %v", clonePool, err) } err = createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create snapshotclass with error %v", err) + e2elog.Failf("failed to create snapshotclass: %v", err) } cloneSC := "clone-storageclass" param := map[string]string{ @@ -1883,11 +1883,11 @@ var _ = Describe("RBD", func() { // create new storageclass with new pool err = createRBDStorageClass(f.ClientSet, f, cloneSC, nil, param, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateCloneInDifferentPool(f, defaultRBDPool, cloneSC, clonePool) if err != nil { - e2elog.Failf("failed to validate clones in different pool with error %v", err) + e2elog.Failf("failed to validate clones in different pool: %v", err) } err = retryKubectlArgs( @@ -1898,33 +1898,33 @@ var _ = Describe("RBD", func() { cloneSC, "--ignore-not-found=true") if err != nil { - e2elog.Failf("failed to delete storageclass %s with error %v", cloneSC, err) + e2elog.Failf("failed to delete storageclass %s: %v", cloneSC, err) } err = deleteResource(rbdExamplePath + "snapshotclass.yaml") if err != nil { - e2elog.Failf("failed to delete snapshotclass with error %v", err) + e2elog.Failf("failed to delete snapshotclass: %v", err) } // validate images in trash err = waitToRemoveImagesFromTrash(f, clonePool, deployTimeout) if err != nil { - e2elog.Failf("failed to validate rbd images in pool %s trash with error %v", clonePool, err) + e2elog.Failf("failed to validate rbd images in pool %s trash: %v", clonePool, err) } err = waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout) if err != nil { - e2elog.Failf("failed to validate rbd images in pool %s trash with error %v", defaultRBDPool, err) + e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err) } err = deletePool(clonePool, false, f) if err != nil { - e2elog.Failf("failed to delete pool %s with error %v", clonePool, err) + e2elog.Failf("failed to delete pool %s: %v", clonePool, err) } }) By("create ROX PVC clone and mount it to multiple pods", func() { err := createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { err = deleteRBDSnapshotClass() @@ -1936,25 +1936,25 @@ var _ = Describe("RBD", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) // delete pod as we should not create snapshot for in-use pvc err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } snap := getSnapshot(snapshotPath) @@ -1963,7 +1963,7 @@ var _ = Describe("RBD", func() { err = createSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to create snapshot with error %v", err) + e2elog.Failf("failed to create snapshot: %v", err) } // validate created backend rbd images // parent PVC + snapshot @@ -1971,7 +1971,7 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, totalImages, defaultRBDPool) pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } // create clone PVC as ROX @@ -1979,7 +1979,7 @@ var _ = Describe("RBD", func() { pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images // parent pvc+ snapshot + clone @@ -1988,7 +1988,7 @@ var _ = Describe("RBD", func() { appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } totalCount := 2 @@ -2005,7 +2005,7 @@ var _ = Describe("RBD", func() { appClone.Name = name err = createApp(f.ClientSet, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } } @@ -2033,23 +2033,23 @@ var _ = Describe("RBD", func() { appClone.Name = name err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } } // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // delete snapshot err = deleteSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to delete snapshot with error %v", err) + e2elog.Failf("failed to delete snapshot: %v", err) } // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2058,7 +2058,7 @@ var _ = Describe("RBD", func() { By("validate PVC mounting if snapshot and parent PVC are deleted", func() { err := createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { err = deleteRBDSnapshotClass() @@ -2070,18 +2070,18 @@ var _ = Describe("RBD", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2092,7 +2092,7 @@ var _ = Describe("RBD", func() { err = createSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to create snapshot with error %v", err) + e2elog.Failf("failed to create snapshot: %v", err) } // validate created backend rbd images // parent PVC + snapshot @@ -2100,13 +2100,13 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, totalImages, defaultRBDPool) pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } // delete parent PVC err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2115,7 +2115,7 @@ var _ = Describe("RBD", func() { pvcClone.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images = snapshot + clone totalImages = 2 @@ -2124,7 +2124,7 @@ var _ = Describe("RBD", func() { // delete snapshot err = deleteSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to delete snapshot with error %v", err) + e2elog.Failf("failed to delete snapshot: %v", err) } // validate created backend rbd images = clone @@ -2133,7 +2133,7 @@ var _ = Describe("RBD", func() { appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } appClone.Namespace = f.UniqueName appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name @@ -2141,17 +2141,17 @@ var _ = Describe("RBD", func() { // create application err = createApp(f.ClientSet, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2164,7 +2164,7 @@ var _ = Describe("RBD", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( @@ -2178,12 +2178,12 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { @@ -2193,29 +2193,29 @@ var _ = Describe("RBD", func() { } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }() // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2228,7 +2228,7 @@ var _ = Describe("RBD", func() { err = createSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to create snapshot with error %v", err) + e2elog.Failf("failed to create snapshot: %v", err) } // validate created backend rbd images // parent PVC + snapshot @@ -2236,13 +2236,13 @@ var _ = Describe("RBD", func() { validateRBDImageCount(f, totalImages, defaultRBDPool) pvcClone, err = loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } // delete parent PVC err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2253,7 +2253,7 @@ var _ = Describe("RBD", func() { pvcClone.Spec.DataSource.Name = snap.Name err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images = snapshot + clone totalImages = 2 @@ -2262,7 +2262,7 @@ var _ = Describe("RBD", func() { // delete snapshot err = deleteSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to delete snapshot with error %v", err) + e2elog.Failf("failed to delete snapshot: %v", err) } // validate created backend rbd images = clone @@ -2273,7 +2273,7 @@ var _ = Describe("RBD", func() { // create application err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } pvc = pvcClone @@ -2281,12 +2281,12 @@ var _ = Describe("RBD", func() { err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2297,7 +2297,7 @@ var _ = Describe("RBD", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass( @@ -2311,34 +2311,34 @@ var _ = Describe("RBD", func() { }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }() // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2347,7 +2347,7 @@ var _ = Describe("RBD", func() { var pvcClone *v1.PersistentVolumeClaim pvcClone, err = loadPVC(pvcSmartClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } // create clone PVC @@ -2356,20 +2356,20 @@ var _ = Describe("RBD", func() { pvcClone.Spec.DataSource.Name = pvc.Name err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // delete parent PVC err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name // create application err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } pvc = pvcClone @@ -2377,12 +2377,12 @@ var _ = Describe("RBD", func() { err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2393,30 +2393,30 @@ var _ = Describe("RBD", func() { radosNamespace = radosNS err := deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with Error: %v", err) + e2elog.Failf("failed to delete configmap:: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } err = createRadosNamespace(f) if err != nil { - e2elog.Failf("failed to create rados namespace with error %v", err) + e2elog.Failf("failed to create rados namespace: %v", err) } // delete csi pods err = deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)", cephCSINamespace, false) if err != nil { - e2elog.Failf("failed to delete pods with labels with error %v", err) + e2elog.Failf("failed to delete pods with labels: %v", err) } // wait for csi pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset pods with error %v", err) + e2elog.Failf("timeout waiting for daemonset pods: %v", err) } err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for deployment to be in running state with error %v", err) + e2elog.Failf("timeout waiting for deployment to be in running state: %v", err) } } @@ -2428,11 +2428,11 @@ var _ = Describe("RBD", func() { rbdProvisionerCaps(defaultRBDPool, radosNamespace), ) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDNamespaceProvisionerUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDNamespaceProvisionerUsername, err) } err = createRBDSecret(f, rbdNamespaceProvisionerSecretName, keyringRBDNamespaceProvisionerUsername, key) if err != nil { - e2elog.Failf("failed to create provisioner secret with error %v", err) + e2elog.Failf("failed to create provisioner secret: %v", err) } // create rbd plugin secret key, err = createCephUser( @@ -2440,16 +2440,16 @@ var _ = Describe("RBD", func() { keyringRBDNamespaceNodePluginUsername, rbdNodePluginCaps(defaultRBDPool, radosNamespace)) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDNamespaceNodePluginUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDNamespaceNodePluginUsername, err) } err = createRBDSecret(f, rbdNamespaceNodePluginSecretName, keyringRBDNamespaceNodePluginUsername, key) if err != nil { - e2elog.Failf("failed to create node secret with error %v", err) + e2elog.Failf("failed to create node secret: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } param := make(map[string]string) // override existing secrets @@ -2462,12 +2462,12 @@ var _ = Describe("RBD", func() { err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, param, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validateImageOwner(pvcPath, f) if err != nil { - e2elog.Failf("failed to validate owner of pvc with error %v", err) + e2elog.Failf("failed to validate owner of pvc: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2475,13 +2475,13 @@ var _ = Describe("RBD", func() { // Create a PVC and bind it to an app within the namesapce err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } // Resize Block PVC and check Device size within the namespace err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Failf("failed to resize block PVC with error %v", err) + e2elog.Failf("failed to resize block PVC: %v", err) } // Resize Filesystem PVC and check application directory size @@ -2493,7 +2493,7 @@ var _ = Describe("RBD", func() { // Create a PVC clone and bind it to an app within the namespace err = createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { err = deleteRBDSnapshotClass() @@ -2504,13 +2504,13 @@ var _ = Describe("RBD", func() { pvc, pvcErr := loadPVC(pvcPath) if pvcErr != nil { - e2elog.Failf("failed to load PVC with error %v", pvcErr) + e2elog.Failf("failed to load PVC: %v", pvcErr) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2520,61 +2520,61 @@ var _ = Describe("RBD", func() { snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name err = createSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to create snapshot with error %v", err) + e2elog.Failf("failed to create snapshot: %v", err) } validateRBDImageCount(f, 2, defaultRBDPool) err = validatePVCAndAppBinding(pvcClonePath, appClonePath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } err = deleteSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to delete snapshot with error %v", err) + e2elog.Failf("failed to delete snapshot: %v", err) } // as snapshot is deleted the image count should be one validateRBDImageCount(f, 1, defaultRBDPool) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } validateRBDImageCount(f, 0, defaultRBDPool) err = waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout) if err != nil { - e2elog.Failf("failed to validate rbd images in pool %s trash with error %v", rbdOptions(defaultRBDPool), err) + e2elog.Failf("failed to validate rbd images in pool %s trash: %v", rbdOptions(defaultRBDPool), err) } // delete RBD provisioner secret err = deleteCephUser(f, keyringRBDNamespaceProvisionerUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDNamespaceProvisionerUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDNamespaceProvisionerUsername, err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdNamespaceProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete provisioner secret with error %v", err) + e2elog.Failf("failed to delete provisioner secret: %v", err) } // delete RBD plugin secret err = deleteCephUser(f, keyringRBDNamespaceNodePluginUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDNamespaceNodePluginUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDNamespaceNodePluginUsername, err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdNamespaceNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete node secret with error %v", err) + e2elog.Failf("failed to delete node secret: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } updateConfigMap("") }) @@ -2583,14 +2583,14 @@ var _ = Describe("RBD", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName @@ -2602,7 +2602,7 @@ var _ = Describe("RBD", func() { app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC and application with error %v", err) + e2elog.Failf("failed to create PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) @@ -2625,7 +2625,7 @@ var _ = Describe("RBD", func() { // delete PVC and app err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC and application with error %v", err) + e2elog.Failf("failed to delete PVC and application: %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0, defaultRBDPool) @@ -2634,18 +2634,18 @@ var _ = Describe("RBD", func() { By("create a thick-provisioned PVC", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{ "thickProvision": "true", }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } pvc, err := loadPVC(rawPvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error: %v", err) + e2elog.Failf("failed to load PVC:: %v", err) } pvcSizes := []string{ @@ -2664,37 +2664,37 @@ var _ = Describe("RBD", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("create a PVC and Bind it to an app for mapped rbd image with options", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{ "mapOptions": "lock_on_read,queue_depth=1024", "unmapOptions": "force", }, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application binding with error %v", err) + e2elog.Failf("failed to validate pvc and application binding: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) @@ -2874,7 +2874,7 @@ var _ = Describe("RBD", func() { By("validate stale images in trash", func() { err := waitToRemoveImagesFromTrash(f, defaultRBDPool, deployTimeout) if err != nil { - e2elog.Failf("failed to validate rbd images in pool %s trash with error %v", defaultRBDPool, err) + e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err) } }) @@ -2883,18 +2883,18 @@ var _ = Describe("RBD", func() { By("Create a PVC and delete PVC when backend pool deleted", func() { err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) if err != nil { - e2elog.Failf("failed to delete PVC when pool not found with error %v", err) + e2elog.Failf("failed to delete PVC when pool not found: %v", err) } }) // delete RBD provisioner secret err := deleteCephUser(f, keyringRBDProvisionerUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDProvisionerUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err) } // delete RBD plugin secret err = deleteCephUser(f, keyringRBDNodePluginUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDNodePluginUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) } }) }) From 5647d4da245fb6f658cd63229d1faf4285dc1a6f Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 11:35:49 +0530 Subject: [PATCH 10/23] e2e: reformat error message with proper error formatting To make the error return consistent across e2e tests we have decided to remove `with error` presence from the logs and this commit does that for cephfs tests. Signed-off-by: Humble Chirammal --- e2e/cephfs.go | 252 +++++++++++++++++++++++++------------------------- 1 file changed, 126 insertions(+), 126 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 388d6b1a6..dbbc1d2e4 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -38,21 +38,21 @@ func deployCephfsPlugin() { data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-") if err != nil { - e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err) + e2elog.Failf("failed to delete provisioner rbac %s: %v", cephFSDirPath+cephFSProvisionerRBAC, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-") if err != nil { - e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err) + e2elog.Failf("failed to delete nodeplugin rbac %s: %v", cephFSDirPath+cephFSNodePluginRBAC, err) } createORDeleteCephfsResources(kubectlCreate) @@ -68,12 +68,12 @@ func createORDeleteCephfsResources(action kubectlAction) { // createORDeleteRbdResources is used for upgrade testing as csidriverObject is // newly added, discarding file not found error. if !os.IsNotExist(err) { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+csiDriverObject, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+csiDriverObject, err) } } else { err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout) if err != nil { - e2elog.Failf("failed to %s CSIDriver object with error %v", action, err) + e2elog.Failf("failed to %s CSIDriver object: %v", action, err) } } cephConf, err := ioutil.ReadFile(examplePath + cephConfconfigMap) @@ -81,74 +81,74 @@ func createORDeleteCephfsResources(action kubectlAction) { // createORDeleteCephfsResources is used for upgrade testing as cephConfConfigmap is // newly added, discarding file not found error. if !os.IsNotExist(err) { - e2elog.Failf("failed to read content from %s with error %v", examplePath+cephConfconfigMap, err) + e2elog.Failf("failed to read content from %s: %v", examplePath+cephConfconfigMap, err) } } else { err = retryKubectlInput(cephCSINamespace, action, string(cephConf), deployTimeout) if err != nil { - e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err) + e2elog.Failf("failed to %s ceph-conf configmap object: %v", action, err) } } data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisioner) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisioner, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSProvisioner, err) } data = oneReplicaDeployYaml(data) err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner: %v", action, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSProvisionerRBAC, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner rbac: %v", action, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerPSP) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerPSP, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSProvisionerPSP, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner psp: %v", action, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePlugin) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePlugin, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSNodePlugin, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin: %v", action, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSNodePluginRBAC, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin rbac: %v", action, err) } data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginPSP) if err != nil { - e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginPSP, err) + e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSNodePluginPSP, err) } err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout) if err != nil { - e2elog.Failf("failed to %s CephFS nodeplugin psp with error %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin psp: %v", action, err) } } func validateSubvolumeCount(f *framework.Framework, count int, fileSystemName, subvolumegroup string) { subVol, err := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) if err != nil { - e2elog.Failf("failed to list CephFS subvolumes with error %v", err) + e2elog.Failf("failed to list CephFS subvolumes: %v", err) } if len(subVol) != count { e2elog.Failf("subvolumes [%v]. subvolume count %d not matching expected count %v", subVol, len(subVol), count) @@ -158,7 +158,7 @@ func validateSubvolumeCount(f *framework.Framework, count int, fileSystemName, s func validateSubvolumePath(f *framework.Framework, pvcName, pvcNamespace, fileSystemName, subvolumegroup string) error { _, pv, err := getPVCAndPV(f.ClientSet, pvcName, pvcNamespace) if err != nil { - return fmt.Errorf("failed to get PVC %s in namespace %s with error %w", pvcName, pvcNamespace, err) + return fmt.Errorf("failed to get PVC %s in namespace %s: %w", pvcName, pvcNamespace, err) } subVolumePathInPV := pv.Spec.CSI.VolumeAttributes["subvolumePath"] subVolume := pv.Spec.CSI.VolumeAttributes["subvolumeName"] @@ -195,32 +195,32 @@ var _ = Describe("cephfs", func() { if cephCSINamespace != defaultNs { err := createNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to create namespace %s with error %v", cephCSINamespace, err) + e2elog.Failf("failed to create namespace %s: %v", cephCSINamespace, err) } } deployCephfsPlugin() } err := createConfigMap(cephFSDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } // create cephFS provisioner secret key, err := createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps()) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringCephFSProvisionerUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err) } err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key) if err != nil { - e2elog.Failf("failed to create provisioner secret with error %v", err) + e2elog.Failf("failed to create provisioner secret: %v", err) } // create cephFS plugin secret key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps()) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringCephFSNodePluginUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err) } err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key) if err != nil { - e2elog.Failf("failed to create node secret with error %v", err) + e2elog.Failf("failed to create node secret: %v", err) } }) @@ -241,30 +241,30 @@ var _ = Describe("cephfs", func() { } err := deleteConfigMap(cephFSDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete provisioner secret with error %v", err) + e2elog.Failf("failed to delete provisioner secret: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete node secret with error %v", err) + e2elog.Failf("failed to delete node secret: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } if deployCephFS { deleteCephfsPlugin() if cephCSINamespace != defaultNs { err := deleteNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to delete namespace %s with error %v", cephCSINamespace, err) + e2elog.Failf("failed to delete namespace %s: %v", cephCSINamespace, err) } } } @@ -284,14 +284,14 @@ var _ = Describe("cephfs", func() { By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err) + e2elog.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err) } }) By("checking nodeplugin deamonset pods are running", func() { err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset %s with error %v", cephFSDeamonSetName, err) + e2elog.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err) } }) @@ -300,16 +300,16 @@ var _ = Describe("cephfs", func() { By("verify PVC and app binding on helm installation", func() { err := validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } // Deleting the storageclass and secret created by helm err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } err = deleteResource(cephFSExamplePath + "secret.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } }) } @@ -349,22 +349,22 @@ var _ = Describe("cephfs", func() { scPath := cephFSExamplePath + "secret.yaml" err := validateCephFsStaticPV(f, appPath, scPath) if err != nil { - e2elog.Failf("failed to validate CephFS static pv with error %v", err) + e2elog.Failf("failed to validate CephFS static pv: %v", err) } }) By("create a storageclass with pool and a PVC then bind it to an app", func() { err := createCephfsStorageClass(f.ClientSet, f, true, nil) if err != nil { - e2elog.Failf("failed to create CephFS storageclass with error %v", err) + e2elog.Failf("failed to create CephFS storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } }) @@ -376,17 +376,17 @@ var _ = Describe("cephfs", func() { false, map[string]string{"volumeNamePrefix": volumeNamePrefix}) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // set up PVC pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) @@ -394,7 +394,7 @@ var _ = Describe("cephfs", func() { foundIt := false subvolumes, err := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) if err != nil { - e2elog.Failf("failed to list subvolumes with error %v", err) + e2elog.Failf("failed to list subvolumes: %v", err) } for _, subVol := range subvolumes { fmt.Printf("Checking prefix on %s\n", subVol) @@ -407,12 +407,12 @@ var _ = Describe("cephfs", func() { // clean up after ourselves err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } if !foundIt { e2elog.Failf("could not find subvolume with prefix %s", volumeNamePrefix) @@ -425,15 +425,15 @@ var _ = Describe("cephfs", func() { } err := createCephfsStorageClass(f.ClientSet, f, true, params) if err != nil { - e2elog.Failf("failed to create CephFS storageclass with error %v", err) + e2elog.Failf("failed to create CephFS storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } }) @@ -444,33 +444,33 @@ var _ = Describe("cephfs", func() { } err := createCephfsStorageClass(f.ClientSet, f, true, params) if err != nil { - e2elog.Failf("failed to create CephFS storageclass with error %v", err) + e2elog.Failf("failed to create CephFS storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } }) By("create a PVC and bind it to an app", func() { err := createCephfsStorageClass(f.ClientSet, f, false, nil) if err != nil { - e2elog.Failf("failed to create CephFS storageclass with error %v", err) + e2elog.Failf("failed to create CephFS storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) } }) By("create a PVC and bind it to an app with normal user", func() { err := validateNormalUserPVCAccess(pvcPath, f) if err != nil { - e2elog.Failf("failed to validate normal user CephFS pvc and application binding with error %v", err) + e2elog.Failf("failed to validate normal user CephFS pvc and application binding: %v", err) } }) @@ -478,13 +478,13 @@ var _ = Describe("cephfs", func() { totalCount := 2 pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName // create PVC and app @@ -492,11 +492,11 @@ var _ = Describe("cephfs", func() { name := fmt.Sprintf("%s%d", f.UniqueName, i) err = createPVCAndApp(name, f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC or application with error %v", err) + e2elog.Failf("failed to create PVC or application: %v", err) } err = validateSubvolumePath(f, pvc.Name, pvc.Namespace, fileSystemName, subvolumegroup) if err != nil { - e2elog.Failf("failed to validate subvolumePath with error %v", err) + e2elog.Failf("failed to validate subvolumePath: %v", err) } } @@ -506,7 +506,7 @@ var _ = Describe("cephfs", func() { name := fmt.Sprintf("%s%d", f.UniqueName, i) err = deletePVCAndApp(name, f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } } @@ -516,54 +516,54 @@ var _ = Describe("cephfs", func() { By("check data persist after recreating pod", func() { err := checkDataPersist(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to check data persist in pvc with error %v", err) + e2elog.Failf("failed to check data persist in pvc: %v", err) } }) By("Create PVC, bind it to an app, unmount volume and check app deletion", func() { pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC or application with error %v", err) + e2elog.Failf("failed to create PVC or application: %v", err) } err = unmountCephFSVolume(f, app.Name, pvc.Name) if err != nil { - e2elog.Failf("failed to unmount volume with error %v", err) + e2elog.Failf("failed to unmount volume: %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } }) By("create PVC, delete backing subvolume and check pv deletion", func() { pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } err = deleteBackingCephFSVolume(f, pvc) if err != nil { - e2elog.Failf("failed to delete CephFS subvolume with error %v", err) + e2elog.Failf("failed to delete CephFS subvolume: %v", err) } err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } }) By("validate multiple subvolumegroup creation", func() { err := deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // re-define configmap with information of multiple clusters. @@ -577,27 +577,27 @@ var _ = Describe("cephfs", func() { err = createCustomConfigMap(f.ClientSet, cephFSDirPath, clusterInfo) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } params := map[string]string{ "clusterID": "clusterID-1", } err = createCephfsStorageClass(f.ClientSet, f, false, params) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application with error %v", err) + e2elog.Failf("failed to validate pvc and application: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } // verify subvolume group creation. err = validateSubvolumegroup(f, "subvolgrp1") if err != nil { - e2elog.Failf("failed to validate subvolume group with error %v", err) + e2elog.Failf("failed to validate subvolume group: %v", err) } // create resources and verify subvolume group creation @@ -605,38 +605,38 @@ var _ = Describe("cephfs", func() { params["clusterID"] = "clusterID-2" err = createCephfsStorageClass(f.ClientSet, f, false, params) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to validate pvc and application with error %v", err) + e2elog.Failf("failed to validate pvc and application: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = validateSubvolumegroup(f, "subvolgrp2") if err != nil { - e2elog.Failf("failed to validate subvolume group with error %v", err) + e2elog.Failf("failed to validate subvolume group: %v", err) } err = deleteConfigMap(cephFSDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = createConfigMap(cephFSDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } err = createCephfsStorageClass(f.ClientSet, f, false, nil) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) By("Resize PVC and check application directory size", func() { err := resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Failf("failed to resize PVC with error %v", err) + e2elog.Failf("failed to resize PVC: %v", err) } }) @@ -644,13 +644,13 @@ var _ = Describe("cephfs", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName @@ -662,7 +662,7 @@ var _ = Describe("cephfs", func() { app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC or application with error %v", err) + e2elog.Failf("failed to create PVC or application: %v", err) } opt := metav1.ListOptions{ @@ -683,24 +683,24 @@ var _ = Describe("cephfs", func() { // delete PVC and app err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } }) By("Delete snapshot after deleting subvolume and snapshot from backend", func() { err := createCephFSSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create CephFS snapshotclass with error %v", err) + e2elog.Failf("failed to create CephFS snapshotclass: %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } snap := getSnapshot(snapshotPath) @@ -715,7 +715,7 @@ var _ = Describe("cephfs", func() { err = deleteBackingCephFSSubvolumeSnapshot(f, pvc, &snap) if err != nil { - e2elog.Failf("failed to delete backing snapshot for snapname with error=%s", err) + e2elog.Failf("failed to delete backing snapshot for snapname:=%s", err) } err = deleteBackingCephFSVolume(f, pvc) @@ -725,19 +725,19 @@ var _ = Describe("cephfs", func() { err = deleteSnapshot(&snap, deployTimeout) if err != nil { - e2elog.Failf("failed to delete snapshot with error=%s", err) + e2elog.Failf("failed to delete snapshot:=%s", err) } else { e2elog.Logf("successfully deleted snapshot") } err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err) + e2elog.Failf("failed to delete CephFS snapshotclass: %v", err) } }) @@ -749,17 +749,17 @@ var _ = Describe("cephfs", func() { err := createCephFSSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create CephFS snapshotclass with error %v", err) + e2elog.Failf("failed to create CephFS snapshotclass: %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } snap := getSnapshot(snapshotPath) @@ -776,17 +776,17 @@ var _ = Describe("cephfs", func() { // another one from snapshot. err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } pvcClone.Namespace = f.UniqueName @@ -814,7 +814,7 @@ var _ = Describe("cephfs", func() { err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") if err != nil { - e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err) + e2elog.Failf("failed to delete CephFS snapshotclass: %v", err) } }) @@ -828,22 +828,22 @@ var _ = Describe("cephfs", func() { wg.Add(totalCount) err := createCephFSSnapshotClass(f) if err != nil { - e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + e2elog.Failf("failed to delete CephFS storageclass: %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName @@ -856,7 +856,7 @@ var _ = Describe("cephfs", func() { } wErr := writeDataInPod(app, &opt, f) if wErr != nil { - e2elog.Failf("failed to write data with error %v", wErr) + e2elog.Failf("failed to write data : %v", wErr) } snap := getSnapshot(snapshotPath) @@ -886,11 +886,11 @@ var _ = Describe("cephfs", func() { pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } pvcClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName @@ -1032,7 +1032,7 @@ var _ = Describe("cephfs", func() { // delete parent pvc err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) @@ -1047,17 +1047,17 @@ var _ = Describe("cephfs", func() { totalSubvolumes := totalCount + 1 pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name @@ -1074,13 +1074,13 @@ var _ = Describe("cephfs", func() { pvcClone, err := loadPVC(pvcSmartClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Namespace = f.UniqueName appClone, err := loadApp(appSmartClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } appClone.Namespace = f.UniqueName wg.Add(totalCount) @@ -1111,7 +1111,7 @@ var _ = Describe("cephfs", func() { // delete parent pvc err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } wg.Add(totalCount) @@ -1144,25 +1144,25 @@ var _ = Describe("cephfs", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } pvcClone, err := loadPVC(pvcSmartClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvcClone.Namespace = f.UniqueName pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } app.Namespace = f.UniqueName @@ -1173,7 +1173,7 @@ var _ = Describe("cephfs", func() { app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name err = createPVCAndApp("", f, pvcClone, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC or application with error %v", err) + e2elog.Failf("failed to create PVC or application: %v", err) } opt := metav1.ListOptions{ @@ -1194,13 +1194,13 @@ var _ = Describe("cephfs", func() { // delete cloned ROX pvc and app err = deletePVCAndApp("", f, pvcClone, app) if err != nil { - e2elog.Failf("failed to delete PVC or application with error %v", err) + e2elog.Failf("failed to delete PVC or application: %v", err) } // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } }) // Make sure this should be last testcase in this file, because @@ -1208,18 +1208,18 @@ var _ = Describe("cephfs", func() { By("Create a PVC and delete PVC when backend pool deleted", func() { err := pvcDeleteWhenPoolNotFound(pvcPath, true, f) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } }) // delete cephFS provisioner secret err := deleteCephUser(f, keyringCephFSProvisionerUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringCephFSProvisionerUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err) } // delete cephFS plugin secret err = deleteCephUser(f, keyringCephFSNodePluginUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringCephFSNodePluginUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err) } }) }) From 14389c7b4049e2d0999c434be00da87f1a9ec5ae Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:18:18 +0530 Subject: [PATCH 11/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/utils.go. Signed-off-by: Humble Chirammal --- e2e/utils.go | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/e2e/utils.go b/e2e/utils.go index 6d0ea5e23..58a13853e 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -526,21 +526,21 @@ func writeDataAndCalChecksum(app *v1.Pod, opt *metav1.ListOptions, f *framework. // write data in PVC err := writeDataInPod(app, opt, f) if err != nil { - e2elog.Logf("failed to write data in the pod with error %v", err) + e2elog.Logf("failed to write data in the pod: %v", err) return "", err } checkSum, err := calculateSHA512sum(f, app, filePath, opt) if err != nil { - e2elog.Logf("failed to calculate checksum with error %v", err) + e2elog.Logf("failed to calculate checksum: %v", err) return checkSum, err } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete pod with error %v", err) + e2elog.Failf("failed to delete pod: %v", err) } return checkSum, nil @@ -558,18 +558,18 @@ func validatePVCClone( chErrs := make([]error, totalCount) pvc, err := loadPVC(sourcePvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } label := make(map[string]string) pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } app, err := loadApp(sourceAppPath) if err != nil { - e2elog.Failf("failed to load app with error %v", err) + e2elog.Failf("failed to load app: %v", err) } label[appKey] = appLabel app.Namespace = f.UniqueName @@ -589,20 +589,20 @@ func validatePVCClone( if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem { checkSum, err = writeDataAndCalChecksum(app, &opt, f) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } } // validate created backend rbd images validateRBDImageCount(f, 1, defaultRBDPool) pvcClone, err := loadPVC(clonePvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Namespace = f.UniqueName appClone, err := loadApp(clonePvcAppPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } appClone.Namespace = f.UniqueName wg.Add(totalCount) @@ -690,7 +690,7 @@ func validatePVCClone( // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } totalCloneCount = totalCount + totalCount @@ -765,7 +765,7 @@ func validatePVCSnapshot( chErrs := make([]error, totalCount) err := createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } defer func() { err = deleteRBDSnapshotClass() @@ -776,17 +776,17 @@ func validatePVCSnapshot( pvc, err := loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } label := make(map[string]string) pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to create PVC with error %v", err) + e2elog.Failf("failed to create PVC: %v", err) } app, err := loadApp(appPath) if err != nil { - e2elog.Failf("failed to load app with error %v", err) + e2elog.Failf("failed to load app: %v", err) } // write data in PVC label[appKey] = appLabel @@ -798,7 +798,7 @@ func validatePVCSnapshot( app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name checkSum, err := writeDataAndCalChecksum(app, &opt, f) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } validateRBDImageCount(f, 1, defaultRBDPool) snap := getSnapshot(snapshotPath) @@ -816,7 +816,7 @@ func validatePVCSnapshot( content, sErr := getVolumeSnapshotContent(s.Namespace, s.Name) if sErr != nil { wgErrs[n] = fmt.Errorf( - "failed to get snapshotcontent for %s in namespace %s with error: %w", + "failed to get snapshotcontent for %s in namespace %s: %w", s.Name, s.Namespace, sErr) @@ -850,11 +850,11 @@ func validatePVCSnapshot( validateRBDImageCount(f, totalCount+1, defaultRBDPool) pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load PVC with error %v", err) + e2elog.Failf("failed to load PVC: %v", err) } appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } pvcClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName @@ -902,7 +902,7 @@ func validatePVCSnapshot( checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) e2elog.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name) if chErrs[n] != nil { - e2elog.Logf("failed to calculte checksum for clone with error %s", chErrs[n]) + e2elog.Logf("failed to calculte checksum for clone: %s", chErrs[n]) } if checkSumClone != checkSum { e2elog.Logf( @@ -998,7 +998,7 @@ func validatePVCSnapshot( // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - e2elog.Failf("failed to delete PVC with error %v", err) + e2elog.Failf("failed to delete PVC: %v", err) } // total images in cluster is total snaps+ total clones @@ -1016,7 +1016,7 @@ func validatePVCSnapshot( content, err = getVolumeSnapshotContent(s.Namespace, s.Name) if err != nil { wgErrs[n] = fmt.Errorf( - "failed to get snapshotcontent for %s in namespace %s with error: %w", + "failed to get snapshotcontent for %s in namespace %s: %w", s.Name, s.Namespace, err) @@ -1221,7 +1221,7 @@ func validateController( func k8sVersionGreaterEquals(c kubernetes.Interface, major, minor int) bool { v, err := c.Discovery().ServerVersion() if err != nil { - e2elog.Failf("failed to get server version with error %v", err) + e2elog.Failf("failed to get server version: %v", err) // Failf() marks the case as failure, and returns from the // Go-routine that runs the case. This function will not have a // return value. From 4ca19ad2ff6e7d9f18cb50e6e9c38bafc4986f14 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:19:34 +0530 Subject: [PATCH 12/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/ceph_user.go. Signed-off-by: Humble Chirammal --- e2e/ceph_user.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/ceph_user.go b/e2e/ceph_user.go index 00b273d6b..f3f5ba5cd 100644 --- a/e2e/ceph_user.go +++ b/e2e/ceph_user.go @@ -87,7 +87,7 @@ func createCephUser(f *framework.Framework, user string, caps []string) (string, return "", err } if stdErr != "" { - return "", fmt.Errorf("failed to create user %s with error %v", cmd, stdErr) + return "", fmt.Errorf("failed to create user %s: %v", cmd, stdErr) } return strings.TrimSpace(stdOut), nil From 5ee723e634f74b6b0e985f0860910e12401a15b6 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:21:04 +0530 Subject: [PATCH 13/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/rbd_helper.go. Signed-off-by: Humble Chirammal --- e2e/rbd_helper.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index 1684aeafb..08661c792 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -315,13 +315,13 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc, wgErrs := make([]error, totalCount) pvc, err := loadPVC(pvcPath) if err != nil { - return fmt.Errorf("failed to load PVC with error %w", err) + return fmt.Errorf("failed to load PVC: %w", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - return fmt.Errorf("failed to create PVC with error %w", err) + return fmt.Errorf("failed to create PVC: %w", err) } validateRBDImageCount(f, 1, defaultRBDPool) snap := getSnapshot(snapshotPath) @@ -345,7 +345,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc, // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - return fmt.Errorf("failed to delete PVC with error %w", err) + return fmt.Errorf("failed to delete PVC: %w", err) } // validate the rbd images created for snapshots @@ -353,11 +353,11 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc, pvcClone, err := loadPVC(pvcClonePath) if err != nil { - return fmt.Errorf("failed to load PVC with error %w", err) + return fmt.Errorf("failed to load PVC: %w", err) } appClone, err := loadApp(appClonePath) if err != nil { - return fmt.Errorf("failed to load application with error %w", err) + return fmt.Errorf("failed to load application: %w", err) } pvcClone.Namespace = f.UniqueName // if request is to create clone with different storage class @@ -839,7 +839,7 @@ func deletePVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolum } if stdErr != "" { return fmt.Errorf( - "failed to remove omap %s csi.volume.%s with error %v", + "failed to remove omap %s csi.volume.%s: %v", rbdOptions(pool), imageData.imageID, stdErr) @@ -866,7 +866,7 @@ func deletePVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeC } if stdErr != "" { return fmt.Errorf( - "failed to remove %s csi.volumes.default csi.volume.%s with error %v", + "failed to remove %s csi.volumes.default csi.volume.%s: %v", rbdOptions(pool), imageData.imageID, stdErr) @@ -881,7 +881,7 @@ func validateThickPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, siz err := createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - return fmt.Errorf("failed to create PVC with error %w", err) + return fmt.Errorf("failed to create PVC: %w", err) } validateRBDImageCount(f, 1, defaultRBDPool) @@ -929,7 +929,7 @@ func validateThickPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim, siz err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - return fmt.Errorf("failed to delete PVC with error: %w", err) + return fmt.Errorf("failed to delete PVC:: %w", err) } validateRBDImageCount(f, 0, defaultRBDPool) @@ -990,16 +990,16 @@ func recreateCSIRBDPods(f *framework.Framework) error { err := deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)", cephCSINamespace, false) if err != nil { - return fmt.Errorf("failed to delete pods with labels with error %w", err) + return fmt.Errorf("failed to delete pods with labels: %w", err) } // wait for csi pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - return fmt.Errorf("timeout waiting for daemonset pods with error %w", err) + return fmt.Errorf("timeout waiting for daemonset pods: %w", err) } err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - return fmt.Errorf("timeout waiting for deployment to be in running state with error %w", err) + return fmt.Errorf("timeout waiting for deployment to be in running state: %w", err) } return nil From fc7d3fadf5d12b4ee480dcfcba56cce28428154b Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:23:14 +0530 Subject: [PATCH 14/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/upgrade-cephfs.go. Signed-off-by: Humble Chirammal --- e2e/upgrade-cephfs.go | 86 +++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/e2e/upgrade-cephfs.go b/e2e/upgrade-cephfs.go index 68f512062..e01926c6b 100644 --- a/e2e/upgrade-cephfs.go +++ b/e2e/upgrade-cephfs.go @@ -46,7 +46,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { if cephCSINamespace != defaultNs { err = createNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to create namespace with error %v", err) + e2elog.Failf("failed to create namespace: %v", err) } } @@ -54,43 +54,43 @@ var _ = Describe("CephFS Upgrade Testing", func() { // when we are done upgrading. cwd, err = os.Getwd() if err != nil { - e2elog.Failf("failed to getwd with error %v", err) + e2elog.Failf("failed to getwd: %v", err) } err = upgradeAndDeployCSI(upgradeVersion, "cephfs") if err != nil { - e2elog.Failf("failed to upgrade csi with error %v", err) + e2elog.Failf("failed to upgrade csi: %v", err) } err = createConfigMap(cephFSDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } var key string // create cephFS provisioner secret key, err = createCephUser(f, keyringCephFSProvisionerUsername, cephFSProvisionerCaps()) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringCephFSProvisionerUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringCephFSProvisionerUsername, err) } err = createCephfsSecret(f, cephFSProvisionerSecretName, keyringCephFSProvisionerUsername, key) if err != nil { - e2elog.Failf("failed to create provisioner secret with error %v", err) + e2elog.Failf("failed to create provisioner secret: %v", err) } // create cephFS plugin secret key, err = createCephUser(f, keyringCephFSNodePluginUsername, cephFSNodePluginCaps()) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringCephFSNodePluginUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringCephFSNodePluginUsername, err) } err = createCephfsSecret(f, cephFSNodePluginSecretName, keyringCephFSNodePluginUsername, key) if err != nil { - e2elog.Failf("failed to create node secret with error %v", err) + e2elog.Failf("failed to create node secret: %v", err) } err = createCephFSSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create snapshotclass with error %v", err) + e2elog.Failf("failed to create snapshotclass: %v", err) } err = createCephfsStorageClass(f.ClientSet, f, true, nil) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } }) AfterEach(func() { @@ -110,27 +110,27 @@ var _ = Describe("CephFS Upgrade Testing", func() { } err = deleteConfigMap(cephFSDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), cephFSProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete provisioner secret with error %v", err) + e2elog.Failf("failed to delete provisioner secret: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), cephFSNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete node secret with error %v", err) + e2elog.Failf("failed to delete node secret: %v", err) } err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } if deployCephFS { deleteCephfsPlugin() @@ -138,7 +138,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { err = deleteNamespace(c, cephCSINamespace) if err != nil { if err != nil { - e2elog.Failf("failed to delete namespace with error %v", err) + e2elog.Failf("failed to delete namespace: %v", err) } } } @@ -150,13 +150,13 @@ var _ = Describe("CephFS Upgrade Testing", func() { By("checking provisioner deployment is running", func() { err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err) + e2elog.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err) } }) By("checking nodeplugin deamonset pods are running", func() { err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset %s with error%v", cephFSDeamonSetName, err) + e2elog.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err) } }) @@ -169,13 +169,13 @@ var _ = Describe("CephFS Upgrade Testing", func() { pvc, err = loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvc.Namespace = f.UniqueName app, err = loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = appLabel app.Namespace = f.UniqueName @@ -184,7 +184,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc and application with error %v", err) + e2elog.Failf("failed to create pvc and application: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -212,7 +212,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { e2elog.Logf("Calculating checksum of %s", filePath) checkSum, err = calculateSHA512sum(f, app, filePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } // pvc clone is only supported from v1.16+ @@ -230,25 +230,25 @@ var _ = Describe("CephFS Upgrade Testing", func() { } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } deleteCephfsPlugin() // switch back to current changes. err = os.Chdir(cwd) if err != nil { - e2elog.Failf("failed to d chdir with error %v", err) + e2elog.Failf("failed to d chdir: %v", err) } deployCephfsPlugin() err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephFSDeploymentName, err) + e2elog.Failf("timeout waiting for upgraded deployment %s: %v", cephFSDeploymentName, err) } err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephFSDeamonSetName, err) + e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", cephFSDeamonSetName, err) } app.Labels = label @@ -256,7 +256,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { // an earlier release. err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } }) @@ -269,13 +269,13 @@ var _ = Describe("CephFS Upgrade Testing", func() { if k8sVersionGreaterEquals(f.ClientSet, 1, 17) { pvcClone, err = loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvcClone.Namespace = f.UniqueName pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) appClone, err = loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = "validate-snap-cephfs" appClone.Namespace = f.UniqueName @@ -283,7 +283,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { appClone.Labels = label err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc and application with error %v", err) + e2elog.Failf("failed to create pvc and application: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -292,7 +292,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { testFilePath := filepath.Join(mountPath, "testClone") newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } if strings.Compare(newCheckSum, checkSum) != 0 { @@ -332,14 +332,14 @@ var _ = Describe("CephFS Upgrade Testing", func() { if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { pvcClone, err = loadPVC(pvcSmartClonePath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Namespace = f.UniqueName pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) appClone, err = loadApp(appSmartClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = "validate-snap-cephfs" appClone.Namespace = f.UniqueName @@ -347,7 +347,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { appClone.Labels = label err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc and application with error %v", err) + e2elog.Failf("failed to create pvc and application: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -356,7 +356,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { testFilePath := filepath.Join(mountPath, "testClone") newCheckSum, err = calculateSHA512sum(f, appClone, testFilePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } if strings.Compare(newCheckSum, checkSum) != 0 { @@ -370,7 +370,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { // delete cloned pvc and pod err = deletePVCAndApp("", f, pvcClone, appClone) if err != nil { - e2elog.Failf("failed to delete pvc and application with error %v", err) + e2elog.Failf("failed to delete pvc and application: %v", err) } } @@ -390,23 +390,23 @@ var _ = Describe("CephFS Upgrade Testing", func() { PersistentVolumeClaims(pvc.Namespace). Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { - e2elog.Failf("failed to get pvc with error %v", err) + e2elog.Failf("failed to get pvc: %v", err) } // resize PVC err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) if err != nil { - e2elog.Failf("failed to expand pvc with error %v", err) + e2elog.Failf("failed to expand pvc: %v", err) } // wait for application pod to come up after resize err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError) if err != nil { - e2elog.Failf("timeout waiting for pod to be in running state with error %v", err) + e2elog.Failf("timeout waiting for pod to be in running state: %v", err) } // validate if resize is successful. err = checkDirSize(app, f, &opt, pvcExpandSize) if err != nil { - e2elog.Failf("failed to check directory size with error %v", err) + e2elog.Failf("failed to check directory size: %v", err) } } }) @@ -414,17 +414,17 @@ var _ = Describe("CephFS Upgrade Testing", func() { By("delete pvc and app") err = deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete pvc and application with error %v", err) + e2elog.Failf("failed to delete pvc and application: %v", err) } // delete cephFS provisioner secret err = deleteCephUser(f, keyringCephFSProvisionerUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringCephFSProvisionerUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringCephFSProvisionerUsername, err) } // delete cephFS plugin secret err = deleteCephUser(f, keyringCephFSNodePluginUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringCephFSNodePluginUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringCephFSNodePluginUsername, err) } }) }) From 9bab088ddcc4a9588f016338b8497a617ded1a88 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:24:05 +0530 Subject: [PATCH 15/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/upgrade-rbd.go. Signed-off-by: Humble Chirammal --- e2e/upgrade-rbd.go | 96 +++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/e2e/upgrade-rbd.go b/e2e/upgrade-rbd.go index 9ddc705d3..67c45ab10 100644 --- a/e2e/upgrade-rbd.go +++ b/e2e/upgrade-rbd.go @@ -42,7 +42,7 @@ var _ = Describe("RBD Upgrade Testing", func() { if cephCSINamespace != defaultNs { err := createNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to create namespace with error %v", err) + e2elog.Failf("failed to create namespace: %v", err) } } @@ -51,52 +51,52 @@ var _ = Describe("RBD Upgrade Testing", func() { var err error cwd, err = os.Getwd() if err != nil { - e2elog.Failf("failed to do getwd with error %v", err) + e2elog.Failf("failed to do getwd: %v", err) } deployVault(f.ClientSet, deployTimeout) err = upgradeAndDeployCSI(upgradeVersion, "rbd") if err != nil { - e2elog.Failf("failed to upgrade and deploy CSI with error %v", err) + e2elog.Failf("failed to upgrade and deploy CSI: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { - e2elog.Failf("failed to create configmap with error %v", err) + e2elog.Failf("failed to create configmap: %v", err) } err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy) if err != nil { - e2elog.Failf("failed to create storageclass with error %v", err) + e2elog.Failf("failed to create storageclass: %v", err) } // create rbd provisioner secret key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", "")) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDProvisionerUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDProvisionerUsername, err) } err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key) if err != nil { - e2elog.Failf("failed to create provisioner secret with error %v", err) + e2elog.Failf("failed to create provisioner secret: %v", err) } // create rbd plugin secret key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", "")) if err != nil { - e2elog.Failf("failed to create user %s with error %v", keyringRBDNodePluginUsername, err) + e2elog.Failf("failed to create user %s: %v", keyringRBDNodePluginUsername, err) } err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key) if err != nil { - e2elog.Failf("failed to create node secret with error %v", err) + e2elog.Failf("failed to create node secret: %v", err) } err = createRBDSnapshotClass(f) if err != nil { - e2elog.Failf("failed to create snapshotclass with error %v", err) + e2elog.Failf("failed to create snapshotclass: %v", err) } err = createNodeLabel(f, nodeRegionLabel, regionValue) if err != nil { - e2elog.Failf("failed to create node label with error %v", err) + e2elog.Failf("failed to create node label: %v", err) } err = createNodeLabel(f, nodeZoneLabel, zoneValue) if err != nil { - e2elog.Failf("failed to create node label with error %v", err) + e2elog.Failf("failed to create node label: %v", err) } }) AfterEach(func() { @@ -117,27 +117,27 @@ var _ = Describe("RBD Upgrade Testing", func() { err := deleteConfigMap(rbdDirPath) if err != nil { - e2elog.Failf("failed to delete configmap with error %v", err) + e2elog.Failf("failed to delete configmap: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete provisioner secret with error %v", err) + e2elog.Failf("failed to delete provisioner secret: %v", err) } err = c.CoreV1(). Secrets(cephCSINamespace). Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { - e2elog.Failf("failed to delete node secret with error %v", err) + e2elog.Failf("failed to delete node secret: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { - e2elog.Failf("failed to delete storageclass with error %v", err) + e2elog.Failf("failed to delete storageclass: %v", err) } err = deleteResource(rbdExamplePath + "snapshotclass.yaml") if err != nil { - e2elog.Failf("failed to delete snapshotclass with error %v", err) + e2elog.Failf("failed to delete snapshotclass: %v", err) } deleteVault() if deployRBD { @@ -145,17 +145,17 @@ var _ = Describe("RBD Upgrade Testing", func() { if cephCSINamespace != defaultNs { err = deleteNamespace(c, cephCSINamespace) if err != nil { - e2elog.Failf("failed to delete namespace with error %v", err) + e2elog.Failf("failed to delete namespace: %v", err) } } } err = deleteNodeLabel(c, nodeRegionLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } err = deleteNodeLabel(c, nodeZoneLabel) if err != nil { - e2elog.Failf("failed to delete node label with error %v", err) + e2elog.Failf("failed to delete node label: %v", err) } }) @@ -167,14 +167,14 @@ var _ = Describe("RBD Upgrade Testing", func() { By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err) + e2elog.Failf("timeout waiting for deployment %s: %v", rbdDeploymentName, err) } }) By("checking nodeplugin deamonset pods are running", func() { err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err) + e2elog.Failf("timeout waiting for daemonset %s: %v", rbdDaemonsetName, err) } }) @@ -186,13 +186,13 @@ var _ = Describe("RBD Upgrade Testing", func() { pvc, err = loadPVC(pvcPath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvc.Namespace = f.UniqueName app, err = loadApp(appPath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = appLabel app.Namespace = f.UniqueName @@ -200,7 +200,7 @@ var _ = Describe("RBD Upgrade Testing", func() { pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc with error %v", err) + e2elog.Failf("failed to create pvc: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -231,7 +231,7 @@ var _ = Describe("RBD Upgrade Testing", func() { e2elog.Logf("Calculating checksum of %s", filePath) checkSum, err = calculateSHA512sum(f, app, filePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } // pvc clone is only supported from v1.16+ @@ -249,25 +249,25 @@ var _ = Describe("RBD Upgrade Testing", func() { } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("failed to delete application with error %v", err) + e2elog.Failf("failed to delete application: %v", err) } deleteRBDPlugin() err = os.Chdir(cwd) if err != nil { - e2elog.Failf("failed to change directory with error %v", err) + e2elog.Failf("failed to change directory: %v", err) } deployRBDPlugin() err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", rbdDeploymentName, err) + e2elog.Failf("timeout waiting for upgraded deployment %s: %v", rbdDeploymentName, err) } err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", rbdDaemonsetName, err) + e2elog.Failf("timeout waiting for upgraded daemonset %s: %v", rbdDaemonsetName, err) } // validate if the app gets bound to a pvc created by @@ -275,7 +275,7 @@ var _ = Describe("RBD Upgrade Testing", func() { app.Labels = label err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - e2elog.Failf("failed to create application with error %v", err) + e2elog.Failf("failed to create application: %v", err) } }) @@ -288,14 +288,14 @@ var _ = Describe("RBD Upgrade Testing", func() { if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { pvcClone, err := loadPVC(pvcClonePath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvcClone.Namespace = f.UniqueName pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) pvcClone.Spec.DataSource.Name = "rbd-pvc-snapshot" appClone, err := loadApp(appClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = "validate-snap-clone" appClone.Namespace = f.UniqueName @@ -303,7 +303,7 @@ var _ = Describe("RBD Upgrade Testing", func() { appClone.Labels = label err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc with error %v", err) + e2elog.Failf("failed to create pvc: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -312,7 +312,7 @@ var _ = Describe("RBD Upgrade Testing", func() { testFilePath := filepath.Join(mountPath, "testClone") newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } if strings.Compare(newCheckSum, checkSum) != 0 { e2elog.Failf( @@ -325,7 +325,7 @@ var _ = Describe("RBD Upgrade Testing", func() { // delete cloned pvc and pod err = deletePVCAndApp("", f, pvcClone, appClone) if err != nil { - e2elog.Failf("failed to delete pvc and application with error %v", err) + e2elog.Failf("failed to delete pvc and application: %v", err) } } @@ -340,14 +340,14 @@ var _ = Describe("RBD Upgrade Testing", func() { if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { pvcClone, err := loadPVC(pvcSmartClonePath) if err != nil { - e2elog.Failf("failed to load pvc with error %v", err) + e2elog.Failf("failed to load pvc: %v", err) } pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Namespace = f.UniqueName pvcClone.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) appClone, err := loadApp(appSmartClonePath) if err != nil { - e2elog.Failf("failed to load application with error %v", err) + e2elog.Failf("failed to load application: %v", err) } label[appKey] = "validate-clone" appClone.Namespace = f.UniqueName @@ -355,7 +355,7 @@ var _ = Describe("RBD Upgrade Testing", func() { appClone.Labels = label err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) if err != nil { - e2elog.Failf("failed to create pvc with error %v", err) + e2elog.Failf("failed to create pvc: %v", err) } opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), @@ -364,7 +364,7 @@ var _ = Describe("RBD Upgrade Testing", func() { testFilePath := filepath.Join(mountPath, "testClone") newCheckSum, err := calculateSHA512sum(f, appClone, testFilePath, &opt) if err != nil { - e2elog.Failf("failed to calculate checksum with error %v", err) + e2elog.Failf("failed to calculate checksum: %v", err) } if strings.Compare(newCheckSum, checkSum) != 0 { e2elog.Failf( @@ -377,7 +377,7 @@ var _ = Describe("RBD Upgrade Testing", func() { // delete cloned pvc and pod err = deletePVCAndApp("", f, pvcClone, appClone) if err != nil { - e2elog.Failf("failed to delete pvc and application with error %v", err) + e2elog.Failf("failed to delete pvc and application: %v", err) } } @@ -398,23 +398,23 @@ var _ = Describe("RBD Upgrade Testing", func() { PersistentVolumeClaims(pvc.Namespace). Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { - e2elog.Failf("failed to get pvc with error %v", err) + e2elog.Failf("failed to get pvc: %v", err) } // resize PVC err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) if err != nil { - e2elog.Failf("failed to expand pvc with error %v", err) + e2elog.Failf("failed to expand pvc: %v", err) } // wait for application pod to come up after resize err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout, noError) if err != nil { - e2elog.Failf("timeout waiting for pod to be in running state with error %v", err) + e2elog.Failf("timeout waiting for pod to be in running state: %v", err) } // validate if resize is successful. err = checkDirSize(app, f, &opt, pvcExpandSize) if err != nil { - e2elog.Failf("failed to check directory size with error %v", err) + e2elog.Failf("failed to check directory size: %v", err) } } }) @@ -422,18 +422,18 @@ var _ = Describe("RBD Upgrade Testing", func() { By("delete pvc and app", func() { err := deletePVCAndApp("", f, pvc, app) if err != nil { - e2elog.Failf("failed to delete pvc and application with error %v", err) + e2elog.Failf("failed to delete pvc and application: %v", err) } }) // delete RBD provisioner secret err := deleteCephUser(f, keyringRBDProvisionerUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDProvisionerUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDProvisionerUsername, err) } // delete RBD plugin secret err = deleteCephUser(f, keyringRBDNodePluginUsername) if err != nil { - e2elog.Failf("failed to delete user %s with error %v", keyringRBDNodePluginUsername, err) + e2elog.Failf("failed to delete user %s: %v", keyringRBDNodePluginUsername, err) } }) }) From f7f5a41774b5b0a8c24b7e9a3a79439b9ed5f4f7 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:26:06 +0530 Subject: [PATCH 16/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/cephfs_helper.go. Signed-off-by: Humble Chirammal --- e2e/cephfs_helper.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index cf453b9bb..28e8d3eb9 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -27,7 +27,7 @@ func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error { return fmt.Errorf("failed to exec command in toolbox: %w", err) } if stdErr != "" { - return fmt.Errorf("failed to getpath for subvolumegroup %s with error %v", subvolgrp, stdErr) + return fmt.Errorf("failed to getpath for subvolumegroup %s : %v", subvolgrp, stdErr) } expectedGrpPath := "/volumes/" + subvolgrp stdOut = strings.TrimSpace(stdOut) @@ -193,7 +193,7 @@ func getSubvolumePath(f *framework.Framework, filesystem, subvolgrp, subvolume s return "", err } if stdErr != "" { - return "", fmt.Errorf("failed to getpath for subvolume %s with error %s", subvolume, stdErr) + return "", fmt.Errorf("failed to getpath for subvolume %s : %s", subvolume, stdErr) } return strings.TrimSpace(stdOut), nil From 929e17d21b4941ef165a9aebb60090f92340e957 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Mon, 22 Nov 2021 12:26:50 +0530 Subject: [PATCH 17/23] e2e: reformat error message with consistent formatting To make the error return consistent across e2e tests we have decided to remove with error presence from the logs and this commit does that for e2e/snapshot.go. Signed-off-by: Humble Chirammal --- e2e/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/snapshot.go b/e2e/snapshot.go index 000db24a2..390a1a09b 100644 --- a/e2e/snapshot.go +++ b/e2e/snapshot.go @@ -124,7 +124,7 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error { } if !apierrs.IsNotFound(err) { return false, fmt.Errorf( - "get on deleted snapshot %v failed with error other than \"not found\": %w", + "get on deleted snapshot %v failed : other than \"not found\": %w", name, err) } From b2099eb3b1a46ed326ded1aa5b2601a418726d91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Nov 2021 13:24:17 +0000 Subject: [PATCH 18/23] rebase: bump k8s.io/kubernetes from 1.22.3 to 1.22.4 Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.22.3 to 1.22.4. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.22.3...v1.22.4) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 63 +- go.sum | 81 +- .../bits-and-blooms/bitset/.gitignore | 26 + .../bits-and-blooms/bitset/.travis.yml | 37 + .../github.com/bits-and-blooms/bitset/LICENSE | 27 + .../bits-and-blooms/bitset/README.md | 93 ++ .../bitset/azure-pipelines.yml | 39 + .../bits-and-blooms/bitset/bitset.go | 952 +++++++++++++ .../github.com/bits-and-blooms/bitset/go.mod | 3 + .../github.com/bits-and-blooms/bitset/go.sum | 0 .../bits-and-blooms/bitset/popcnt.go | 53 + .../bits-and-blooms/bitset/popcnt_19.go | 45 + .../bits-and-blooms/bitset/popcnt_amd64.go | 68 + .../bits-and-blooms/bitset/popcnt_amd64.s | 104 ++ .../bits-and-blooms/bitset/popcnt_generic.go | 24 + .../bitset/trailing_zeros_18.go | 14 + .../bitset/trailing_zeros_19.go | 9 + .../github.com/opencontainers/selinux/LICENSE | 201 +++ .../opencontainers/selinux/go-selinux/doc.go | 14 + .../selinux/go-selinux/selinux.go | 284 ++++ .../selinux/go-selinux/selinux_linux.go | 1212 +++++++++++++++++ .../selinux/go-selinux/selinux_stub.go | 154 +++ .../selinux/go-selinux/xattrs_linux.go | 38 + .../selinux/pkg/pwalk/README.md | 42 + .../opencontainers/selinux/pkg/pwalk/pwalk.go | 104 ++ .../server/egressselector/egress_selector.go | 10 + .../tools/clientcmd/api/v1/conversion.go | 2 +- vendor/k8s.io/cloud-provider/go.mod | 26 +- vendor/k8s.io/cloud-provider/go.sum | 28 +- .../k8s.io/component-base/metrics/options.go | 4 +- .../k8s.io/kubernetes/pkg/util/selinux/doc.go | 19 + .../kubernetes/pkg/util/selinux/selinux.go | 39 + .../pkg/util/selinux/selinux_linux.go | 57 + .../pkg/util/selinux/selinux_unsupported.go | 38 + .../pkg/volume/util/hostutil/fake_hostutil.go | 2 +- .../volume/util/hostutil/hostutil_linux.go | 13 +- .../pkg/volume/util/subpath/subpath_linux.go | 26 +- .../test/e2e/framework/volume/fixtures.go | 10 + .../kubernetes/test/utils/image/manifest.go | 2 +- .../mount-utils/mount_helper_windows.go | 10 +- vendor/modules.txt | 80 +- 41 files changed, 3901 insertions(+), 152 deletions(-) create mode 100644 vendor/github.com/bits-and-blooms/bitset/.gitignore create mode 100644 vendor/github.com/bits-and-blooms/bitset/.travis.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/LICENSE create mode 100644 vendor/github.com/bits-and-blooms/bitset/README.md create mode 100644 vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/bitset.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.mod create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.sum create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_19.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/doc.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/selinux/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/selinux/selinux.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go diff --git a/go.mod b/go.mod index 72f179503..5701743a5 100644 --- a/go.mod +++ b/go.mod @@ -25,13 +25,16 @@ require ( golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 google.golang.org/grpc v1.42.0 - k8s.io/api v0.22.2 - k8s.io/apimachinery v0.22.2 + k8s.io/api v0.22.4 + k8s.io/apimachinery v0.22.4 k8s.io/client-go v12.0.0+incompatible - k8s.io/cloud-provider v0.22.2 + k8s.io/cloud-provider v0.22.4 k8s.io/klog/v2 v2.10.0 - k8s.io/kubernetes v1.22.3 - k8s.io/mount-utils v0.22.2 + // + // when updating k8s.io/kubernetes, make sure to update the replace section too + // + k8s.io/kubernetes v1.22.4 + k8s.io/mount-utils v0.22.4 k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a sigs.k8s.io/controller-runtime v0.10.3 ) @@ -45,31 +48,31 @@ replace ( // // k8s.io/kubernetes depends on these k8s.io packages, but unversioned // - k8s.io/api => k8s.io/api v0.22.2 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.22.2 - k8s.io/apiserver => k8s.io/apiserver v0.22.2 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.2 - k8s.io/client-go => k8s.io/client-go v0.22.2 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.2 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.2 - k8s.io/code-generator => k8s.io/code-generator v0.22.2 - k8s.io/component-base => k8s.io/component-base v0.22.2 - k8s.io/component-helpers => k8s.io/component-helpers v0.22.2 - k8s.io/controller-manager => k8s.io/controller-manager v0.22.2 - k8s.io/cri-api => k8s.io/cri-api v0.22.2 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.2 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.2 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.2 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.2 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.2 - k8s.io/kubectl => k8s.io/kubectl v0.22.2 - k8s.io/kubelet => k8s.io/kubelet v0.22.2 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.2 - k8s.io/metrics => k8s.io/metrics v0.22.2 - k8s.io/mount-utils => k8s.io/mount-utils v0.22.2 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.2 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.2 + k8s.io/api => k8s.io/api v0.22.4 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.4 + k8s.io/apimachinery => k8s.io/apimachinery v0.22.4 + k8s.io/apiserver => k8s.io/apiserver v0.22.4 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.4 + k8s.io/client-go => k8s.io/client-go v0.22.4 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.4 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.4 + k8s.io/code-generator => k8s.io/code-generator v0.22.4 + k8s.io/component-base => k8s.io/component-base v0.22.4 + k8s.io/component-helpers => k8s.io/component-helpers v0.22.4 + k8s.io/controller-manager => k8s.io/controller-manager v0.22.4 + k8s.io/cri-api => k8s.io/cri-api v0.22.4 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.4 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.4 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.4 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.4 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.4 + k8s.io/kubectl => k8s.io/kubectl v0.22.4 + k8s.io/kubelet => k8s.io/kubelet v0.22.4 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.4 + k8s.io/metrics => k8s.io/metrics v0.22.4 + k8s.io/mount-utils => k8s.io/mount-utils v0.22.4 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.4 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.4 // layeh.com seems to be misbehaving layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 ) diff --git a/go.sum b/go.sum index 9c7d27b7d..3993789d3 100644 --- a/go.sum +++ b/go.sum @@ -143,6 +143,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -841,6 +842,7 @@ github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04s github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/api v0.0.0-20210927171657-636513e97fda h1:VoJmrqbFDuqzjlByItbjx/HxmReK4LC+X3Jt2Wv2Ogs= @@ -1576,28 +1578,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= -k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= -k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= -k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= -k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= -k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= -k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI= -k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= -k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= -k8s.io/cloud-provider v0.22.2 h1:CiSDHMJiOd6qgYIP8ln9ueFHFU5Ld8TDZiYNIiMNbNk= -k8s.io/cloud-provider v0.22.2/go.mod h1:HUvZkUkV6dIKgWJQgGvnFhOeEHT87ZP39ij4K0fgkAs= -k8s.io/cluster-bootstrap v0.22.2/go.mod h1:ZkmQKprEqvrUccMnbRHISsMscA1dsQ8SffM9nHq6CgE= -k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= -k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= -k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= -k8s.io/component-helpers v0.22.2 h1:guQ9oYclE5LMydWFfAFA+u7SQgQzz2g+YgpJ5QooSyY= -k8s.io/component-helpers v0.22.2/go.mod h1:+N61JAR9aKYSWbnLA88YcFr9K/6ISYvRNybX7QW7Rs8= -k8s.io/controller-manager v0.22.2/go.mod h1:zeDUbCc66IcMZ81U8qC5Z5pm9A8QkqD7839H8t7//yY= -k8s.io/cri-api v0.22.2/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg= -k8s.io/csi-translation-lib v0.22.2/go.mod h1:HYNFNKFADblw8nVm3eshFVWdmiccxPHN+SUmTKG3Ctk= +k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw= +k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= +k8s.io/apiextensions-apiserver v0.22.4 h1:2iGpcVyw4MnAyyXVJU2Xg6ZsbIxAOfRHo0LF5A5J0RA= +k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw= +k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck= +k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= +k8s.io/apiserver v0.22.4 h1:L+220cy+94UWmyBl1kiVTklBXrBtKsbjlPV60eL2u6s= +k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E= +k8s.io/cli-runtime v0.22.4/go.mod h1:x35r0ERHXr/MrbR1C6MPJxQ3xKG6+hXi9m2xLzlMPZA= +k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg= +k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= +k8s.io/cloud-provider v0.22.4 h1:dNCY8e7XESvDvldkX1/PHsJQWEzGyKN1xxS51GuVQuc= +k8s.io/cloud-provider v0.22.4/go.mod h1:lTaIKDEqJt7UPbsz9sk1Aa719ADIWuFtbh/mgq72UE8= +k8s.io/cluster-bootstrap v0.22.4/go.mod h1:fTQZ6u9G6fg2LHhB8nEgZLnXIhCDSRYuLUUS5pgW8RY= +k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw= +k8s.io/component-base v0.22.4 h1:7qwLJnua2ppGNZrRGDQ0vhsFebI39VGbZ4zdR5ArViI= +k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= +k8s.io/component-helpers v0.22.4 h1:Pso4iXoY6aYLCYQlNkME2MSJvAXo/7lnJYsWHdC6tvE= +k8s.io/component-helpers v0.22.4/go.mod h1:A50qTyczDFbhZDifIfS2zFrHuPk9UNOWPpvNZ+3RSIs= +k8s.io/controller-manager v0.22.4/go.mod h1:DcJNoo4OvXCh9KfESIrX9C9dNQj1OfQrAZrEkFbNMRw= +k8s.io/cri-api v0.22.4/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg= +k8s.io/csi-translation-lib v0.22.4/go.mod h1:8ZHJ0R2rSiL+0OC7WEF9MTMW4+CV4YEzXDng3rogEY4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= @@ -1608,25 +1610,26 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE= k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-aggregator v0.22.2/go.mod h1:hsd0LEmVQSvMc0UzAwmcm/Gk3HzLp50mq/o6cu1ky2A= -k8s.io/kube-controller-manager v0.22.2/go.mod h1:n8Wh6HHmB+EBy3INhucPEeyZE05qtq8ZWcBgFREYwBk= +k8s.io/kube-aggregator v0.22.4/go.mod h1:nH2L1wiG9pMqYV7P8XIMb9RbIEZPBwxz0iJqPPrtALU= +k8s.io/kube-controller-manager v0.22.4/go.mod h1:BLoqqosh47s25JarHCC5ghmV24AlYp5/tRjatt/YjUY= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-proxy v0.22.2/go.mod h1:pk0QwfYdTsg7aC9ycMF5MFbasIxhBAPFCvfwdmNikZs= -k8s.io/kube-scheduler v0.22.2/go.mod h1:aaElZivB8w1u8Ki7QcwuRSL7AcVWC7xa0LzeiT8zQ7I= -k8s.io/kubectl v0.22.2 h1:KMyYNZoBshaL3XKx04X07DtpoD4vMrdkfiN/G2Qx/PU= -k8s.io/kubectl v0.22.2/go.mod h1:BApg2j0edxLArCOfO0ievI27EeTQqBDMNU9VQH734iQ= -k8s.io/kubelet v0.22.2 h1:7ol5AXXxcW97dUE8W/QiPjkXu1ZuGshG5VmgDmviZsc= -k8s.io/kubelet v0.22.2/go.mod h1:ORIRua2/wTcx5UnEvxWosu650/8fatmzbMRC7m6WjAM= -k8s.io/kubernetes v1.22.3 h1:/eFfR5S2Vxn0t9kcLVAZXQFloKMkklWQIf5e0hFbzlA= -k8s.io/kubernetes v1.22.3/go.mod h1:Snea7fgIObGgHmLbUJ3OgjGEr5bjj16iEdp5oHS6eS8= -k8s.io/legacy-cloud-providers v0.22.2/go.mod h1:oC6zhm9nhJ5M4VTDHzsO/4MpddZR5JqEt55zZ52JRMc= -k8s.io/metrics v0.22.2/go.mod h1:GUcsBtpsqQD1tKFS/2wCKu4ZBowwRncLOJH1rgWs3uw= -k8s.io/mount-utils v0.22.2 h1:w/CJq+Cofkr81Rp89UkokgEbuu8Js0LwMI/RWWEE+gs= -k8s.io/mount-utils v0.22.2/go.mod h1:dHl6c2P60T5LHUnZxVslyly9EDCMzvhtISO5aY+Z4sk= -k8s.io/pod-security-admission v0.22.2/go.mod h1:5FK/TIw6rySU522cZVueMcS/LPPovNHbsm1I1gLfVfU= -k8s.io/sample-apiserver v0.22.2/go.mod h1:h+/DIV5EmuNq4vfPr5TSXy9mIBVXXlPAKQMPbjPrlFM= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-proxy v0.22.4/go.mod h1:TTzZmcecSHXUL/3d6P4puVrZt4h0UNhT2RmxSdmg7B0= +k8s.io/kube-scheduler v0.22.4/go.mod h1:2q5YGJngwFZ/9witl/n8Dij9qf52T3nR1g6OD6+pvLM= +k8s.io/kubectl v0.22.4 h1:ECUO1QWyZ70DiIKEfgBx+8i9D98uspVOwgc1APs/07w= +k8s.io/kubectl v0.22.4/go.mod h1:ok2qRT6y2Gy4+y+mniJVyUMKeBHP4OWS9Rdtf/QTM5I= +k8s.io/kubelet v0.22.4 h1:0eaVDObhAuDCDnQJS9xqgfAP5/IWHMt6un4L/DQs0so= +k8s.io/kubelet v0.22.4/go.mod h1:9dCtyqqDnXJYF9E2mejBmDQb+flkAGFBzGgnlW/goyo= +k8s.io/kubernetes v1.22.4 h1:N5kU4bJEghcB2226/GH9Bca+oNcH6JTplcr9euN5ti8= +k8s.io/kubernetes v1.22.4/go.mod h1:cMy6DFG4E+/jxMgxw1aWMwZqvI1AueV3HCcG9S7QNIk= +k8s.io/legacy-cloud-providers v0.22.4/go.mod h1:Kw5X3DTa1/skHsKVgcrcK9d1JVXrdQpG77kWg/JPV68= +k8s.io/metrics v0.22.4/go.mod h1:6F/iwuYb1w2QDCoHkeMFLf4pwHBcYKLm4mPtVHKYrIw= +k8s.io/mount-utils v0.22.4 h1:COkD8uKbphYVZXDYR4+464sZaeiYIwU5hdTyigjz0Xc= +k8s.io/mount-utils v0.22.4/go.mod h1:dHl6c2P60T5LHUnZxVslyly9EDCMzvhtISO5aY+Z4sk= +k8s.io/pod-security-admission v0.22.4/go.mod h1:R6VgmZm77Ik1qWRBBExuiUIRXR6kGQIRM/Zh5yVpYyA= +k8s.io/sample-apiserver v0.22.4/go.mod h1:QIrXoUymVFpdy0Ei5WQjOa/Ewi3Ni+5XEhG8WD/D4iI= k8s.io/system-validators v1.5.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore new file mode 100644 index 000000000..5c204d28b --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +target diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml new file mode 100644 index 000000000..094aa5ce0 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.travis.yml @@ -0,0 +1,37 @@ +language: go + +sudo: false + +branches: + except: + - release + +branches: + only: + - master + - travis + +go: + - "1.11.x" + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; + - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; + - go get github.com/mattn/goveralls + +before_script: + - make deps + +script: + - make qa + +after_failure: + - cat ./target/test/report.xml + +after_success: + - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE new file mode 100644 index 000000000..59cab8a93 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Will Fitzgerald. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md new file mode 100644 index 000000000..97e83071e --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -0,0 +1,93 @@ +# bitset + +*Go language library to map between non-negative integers and boolean values* + +[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) +[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) + + +## Description + +Package bitset implements bitsets, a mapping between non-negative integers and boolean values. +It should be more efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing individual integers. + +But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. + +Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. + +### Example use: + +```go +package main + +import ( + "fmt" + "math/rand" + + "github.com/bits-and-blooms/bitset" +) + +func main() { + fmt.Printf("Hello from BitSet!\n") + var b bitset.BitSet + // play some Go Fish + for i := 0; i < 100; i++ { + card1 := uint(rand.Intn(52)) + card2 := uint(rand.Intn(52)) + b.Set(card1) + if b.Test(card2) { + fmt.Println("Go Fish!") + } + b.Clear(card1) + } + + // Chaining + b.Set(10).Set(11) + + for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { + fmt.Println("The following bit is set:", i) + } + if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { + fmt.Println("Intersection works.") + } else { + fmt.Println("Intersection doesn't work???") + } +} +``` + +As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. + +Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc + +## Memory Usage + +The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). + +## Implementation Note + +Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. + +It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. + +## Installation + +```bash +go get github.com/bits-and-blooms/bitset +``` + +## Contributing + +If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") + +## Running all tests + +Before committing the code, please check if it passes tests, has adequate coverage, etc. +```bash +go test +go test -cover +``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml new file mode 100644 index 000000000..f9b295918 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml @@ -0,0 +1,39 @@ +# Go +# Build your Go project. +# Add steps that test, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/languages/go + +trigger: +- master + +pool: + vmImage: 'Ubuntu-16.04' + +variables: + GOBIN: '$(GOPATH)/bin' # Go binaries path + GOROOT: '/usr/local/go1.11' # Go installation path + GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path + modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code + +steps: +- script: | + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + shopt -s extglob + shopt -s dotglob + mv !(gopath) '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + displayName: 'Set up the Go workspace' + +- script: | + go version + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + go build -v . + workingDirectory: '$(modulePath)' + displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go new file mode 100644 index 000000000..d688806a5 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -0,0 +1,952 @@ +/* +Package bitset implements bitsets, a mapping +between non-negative integers and boolean values. It should be more +efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing +individual integers. + +But it also provides set intersection, union, difference, +complement, and symmetric operations, as well as tests to +check whether any, all, or no bits are set, and querying a +bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the +memory allocation is approximately Max bits, where Max is +the largest set bit. BitSets are never shrunk. On creation, +a hint can be given for the number of bits that will be used. + +Many of the methods, including Set,Clear, and Flip, return +a BitSet pointer, which allows for chaining. + +Example use: + + import "bitset" + var b BitSet + b.Set(10).Set(11) + if b.Test(1000) { + b.Clear(1000) + } + if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { + fmt.Println("Intersection works.") + } + +As an alternative to BitSets, one should check out the 'big' package, +which provides a (less set-theoretical) view of bitsets. + +*/ +package bitset + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// the wordSize of a bit set +const wordSize = uint(64) + +// log2WordSize is lg(wordSize) +const log2WordSize = uint(6) + +// allBits has every bit set +const allBits uint64 = 0xffffffffffffffff + +// default binary BigEndian +var binaryOrder binary.ByteOrder = binary.BigEndian + +// default json encoding base64.URLEncoding +var base64Encoding = base64.URLEncoding + +// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) +func Base64StdEncoding() { base64Encoding = base64.StdEncoding } + +// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) +func LittleEndian() { binaryOrder = binary.LittleEndian } + +// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. +type BitSet struct { + length uint + set []uint64 +} + +// Error is used to distinguish errors (panics) generated in this package. +type Error string + +// safeSet will fixup b.set to be non-nil and return the field value +func (b *BitSet) safeSet() []uint64 { + if b.set == nil { + b.set = make([]uint64, wordsNeeded(0)) + } + return b.set +} + +// From is a constructor used to create a BitSet from an array of integers +func From(buf []uint64) *BitSet { + return &BitSet{uint(len(buf)) * 64, buf} +} + +// Bytes returns the bitset as array of integers +func (b *BitSet) Bytes() []uint64 { + return b.set +} + +// wordsNeeded calculates the number of words needed for i bits +func wordsNeeded(i uint) int { + if i > (Cap() - wordSize + 1) { + return int(Cap() >> log2WordSize) + } + return int((i + (wordSize - 1)) >> log2WordSize) +} + +// New creates a new BitSet with a hint that length bits will be required +func New(length uint) (bset *BitSet) { + defer func() { + if r := recover(); r != nil { + bset = &BitSet{ + 0, + make([]uint64, 0), + } + } + }() + + bset = &BitSet{ + length, + make([]uint64, wordsNeeded(length)), + } + + return bset +} + +// Cap returns the total possible capacity, or number of bits +func Cap() uint { + return ^uint(0) +} + +// Len returns the number of bits in the BitSet. +// Note the difference to method Count, see example. +func (b *BitSet) Len() uint { + return b.length +} + +// extendSetMaybe adds additional words to incorporate new bits if needed +func (b *BitSet) extendSetMaybe(i uint) { + if i >= b.length { // if we need more bits, make 'em + if i >= Cap() { + panic("You are exceeding the capacity") + } + nsize := wordsNeeded(i + 1) + if b.set == nil { + b.set = make([]uint64, nsize) + } else if cap(b.set) >= nsize { + b.set = b.set[:nsize] // fast resize + } else if len(b.set) < nsize { + newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x + copy(newset, b.set) + b.set = newset + } + b.length = i + 1 + } +} + +// Test whether bit i is set. +func (b *BitSet) Test(i uint) bool { + if i >= b.length { + return false + } + return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 +} + +// Set bit i to 1, the capacity of the bitset is automatically +// increased accordingly. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Set(i uint) *BitSet { + b.extendSetMaybe(i) + b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) + return b +} + +// Clear bit i to 0 +func (b *BitSet) Clear(i uint) *BitSet { + if i >= b.length { + return b + } + b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) + return b +} + +// SetTo sets bit i to value. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) SetTo(i uint, value bool) *BitSet { + if value { + return b.Set(i) + } + return b.Clear(i) +} + +// Flip bit at i. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Flip(i uint) *BitSet { + if i >= b.length { + return b.Set(i) + } + b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) + return b +} + +// FlipRange bit in [start, end). +// If end>= Cap(), this function will panic. +// Warning: using a very large value for 'end' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) FlipRange(start, end uint) *BitSet { + if start >= end { + return b + } + + b.extendSetMaybe(end - 1) + var startWord uint = start >> log2WordSize + var endWord uint = end >> log2WordSize + b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) + for i := startWord; i < endWord; i++ { + b.set[i] = ^b.set[i] + } + b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) + return b +} + +// Shrink shrinks BitSet so that the provided value is the last possible +// set value. It clears all bits > the provided index and reduces the size +// and length of the set. +// +// Note that the parameter value is not the new length in bits: it is the +// maximal value that can be stored in the bitset after the function call. +// The new length in bits is the parameter value + 1. Thus it is not possible +// to use this function to set the length to 0, the minimal value of the length +// after this function call is 1. +// +// A new slice is allocated to store the new bits, so you may see an increase in +// memory usage until the GC runs. Normally this should not be a problem, but if you +// have an extremely large BitSet its important to understand that the old BitSet will +// remain in memory until the GC frees it. +func (b *BitSet) Shrink(lastbitindex uint) *BitSet { + length := lastbitindex + 1 + idx := wordsNeeded(length) + if idx > len(b.set) { + return b + } + shrunk := make([]uint64, idx) + copy(shrunk, b.set[:idx]) + b.set = shrunk + b.length = length + b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) + return b +} + +// Compact shrinks BitSet to so that we preserve all set bits, while minimizing +// memory usage. Compact calls Shrink. +func (b *BitSet) Compact() *BitSet { + idx := len(b.set) - 1 + for ; idx >= 0 && b.set[idx] == 0; idx-- { + } + newlength := uint((idx + 1) << log2WordSize) + if newlength >= b.length { + return b // nothing to do + } + if newlength > 0 { + return b.Shrink(newlength - 1) + } + // We preserve one word + return b.Shrink(63) +} + +// InsertAt takes an index which indicates where a bit should be +// inserted. Then it shifts all the bits in the set to the left by 1, starting +// from the given index position, and sets the index position to 0. +// +// Depending on the size of your BitSet, and where you are inserting the new entry, +// this method could be extremely slow and in some cases might cause the entire BitSet +// to be recopied. +func (b *BitSet) InsertAt(idx uint) *BitSet { + insertAtElement := (idx >> log2WordSize) + + // if length of set is a multiple of wordSize we need to allocate more space first + if b.isLenExactMultiple() { + b.set = append(b.set, uint64(0)) + } + + var i uint + for i = uint(len(b.set) - 1); i > insertAtElement; i-- { + // all elements above the position where we want to insert can simply by shifted + b.set[i] <<= 1 + + // we take the most significant bit of the previous element and set it as + // the least significant bit of the current element + b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 + } + + // generate a mask to extract the data that we need to shift left + // within the element where we insert a bit + dataMask := ^(uint64(1)< 0x40000 { + buffer.WriteString("...") + break + } + buffer.WriteString(strconv.FormatInt(int64(i), 10)) + i, e = b.NextSet(i + 1) + if e { + buffer.WriteString(",") + } + } + buffer.WriteString("}") + return buffer.String() +} + +// DeleteAt deletes the bit at the given index position from +// within the bitset +// All the bits residing on the left of the deleted bit get +// shifted right by 1 +// The running time of this operation may potentially be +// relatively slow, O(length) +func (b *BitSet) DeleteAt(i uint) *BitSet { + // the index of the slice element where we'll delete a bit + deleteAtElement := i >> log2WordSize + + // generate a mask for the data that needs to be shifted right + // within that slice element that gets modified + dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) + + // extract the data that we'll shift right from the slice element + data := b.set[deleteAtElement] & dataMask + + // set the masked area to 0 while leaving the rest as it is + b.set[deleteAtElement] &= ^dataMask + + // shift the previously extracted data to the right and then + // set it in the previously masked area + b.set[deleteAtElement] |= (data >> 1) & dataMask + + // loop over all the consecutive slice elements to copy each + // lowest bit into the highest position of the previous element, + // then shift the entire content to the right by 1 + for i := int(deleteAtElement) + 1; i < len(b.set); i++ { + b.set[i-1] |= (b.set[i] & 1) << 63 + b.set[i] >>= 1 + } + + b.length = b.length - 1 + + return b +} + +// NextSet returns the next bit set from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no set bit found) +// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} +// +// Users concerned with performance may want to use NextSetMany to +// retrieve several values at once. +func (b *BitSet) NextSet(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + if w != 0 { + return i + trailingZeroes64(w), true + } + x = x + 1 + for x < len(b.set) { + if b.set[x] != 0 { + return uint(x)*wordSize + trailingZeroes64(b.set[x]), true + } + x = x + 1 + + } + return 0, false +} + +// NextSetMany returns many next bit sets from the specified index, +// including possibly the current index and up to cap(buffer). +// If the returned slice has len zero, then no more set bits were found +// +// buffer := make([]uint, 256) // this should be reused +// j := uint(0) +// j, buffer = bitmap.NextSetMany(j, buffer) +// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { +// for k := range buffer { +// do something with buffer[k] +// } +// j += 1 +// } +// +// +// It is possible to retrieve all set bits as follow: +// +// indices := make([]uint, bitmap.Count()) +// bitmap.NextSetMany(0, indices) +// +// However if bitmap.Count() is large, it might be preferable to +// use several calls to NextSetMany, for performance reasons. +func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { + myanswer := buffer + capacity := cap(buffer) + x := int(i >> log2WordSize) + if x >= len(b.set) || capacity == 0 { + return 0, myanswer[:0] + } + skip := i & (wordSize - 1) + word := b.set[x] >> skip + myanswer = myanswer[:capacity] + size := int(0) + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + i + size++ + if size == capacity { + goto End + } + word = word ^ t + } + x++ + for idx, word := range b.set[x:] { + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + (uint(x+idx) << 6) + size++ + if size == capacity { + goto End + } + word = word ^ t + } + } +End: + if size > 0 { + return myanswer[size-1], myanswer[:size] + } + return 0, myanswer[:0] +} + +// NextClear returns the next clear bit from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no bit found i.e. all bits are set) +func (b *BitSet) NextClear(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + wA := allBits >> (i & (wordSize - 1)) + index := i + trailingZeroes64(^w) + if w != wA && index < b.length { + return index, true + } + x++ + for x < len(b.set) { + index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) + if b.set[x] != allBits && index < b.length { + return index, true + } + x++ + } + return 0, false +} + +// ClearAll clears the entire BitSet +func (b *BitSet) ClearAll() *BitSet { + if b != nil && b.set != nil { + for i := range b.set { + b.set[i] = 0 + } + } + return b +} + +// wordCount returns the number of words used in a bit set +func (b *BitSet) wordCount() int { + return len(b.set) +} + +// Clone this BitSet +func (b *BitSet) Clone() *BitSet { + c := New(b.length) + if b.set != nil { // Clone should not modify current object + copy(c.set, b.set) + } + return c +} + +// Copy into a destination BitSet +// Returning the size of the destination BitSet +// like array copy +func (b *BitSet) Copy(c *BitSet) (count uint) { + if c == nil { + return + } + if b.set != nil { // Copy should not modify current object + copy(c.set, b.set) + } + count = c.length + if b.length < c.length { + count = b.length + } + return +} + +// Count (number of set bits). +// Also known as "popcount" or "population count". +func (b *BitSet) Count() uint { + if b != nil && b.set != nil { + return uint(popcntSlice(b.set)) + } + return 0 +} + +// Equal tests the equivalence of two BitSets. +// False if they are of different sizes, otherwise true +// only if all the same bits are set +func (b *BitSet) Equal(c *BitSet) bool { + if c == nil || b == nil { + return c == b + } + if b.length != c.length { + return false + } + if b.length == 0 { // if they have both length == 0, then could have nil set + return true + } + // testing for equality shoud not transform the bitset (no call to safeSet) + + for p, v := range b.set { + if c.set[p] != v { + return false + } + } + return true +} + +func panicIfNull(b *BitSet) { + if b == nil { + panic(Error("BitSet must not be null")) + } +} + +// Difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + result = b.Clone() // clone b (in case b is bigger than compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + result.set[i] = b.set[i] &^ compare.set[i] + } + return +} + +// DifferenceCardinality computes the cardinality of the differnce +func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + cnt := uint64(0) + cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) + cnt += popcntSlice(b.set[l:]) + return uint(cnt) +} + +// InPlaceDifference computes the difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) InPlaceDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &^= compare.set[i] + } +} + +// Convenience function: return two bitsets ordered by +// increasing length. Note: neither can be nil +func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { + if a.length <= b.length { + ap, bp = a, b + } else { + ap, bp = b, a + } + return +} + +// Intersection of base set and other set +// This is the BitSet equivalent of & (and) +func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = New(b.length) + for i, word := range b.set { + result.set[i] = word & compare.set[i] + } + return +} + +// IntersectionCardinality computes the cardinality of the union +func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntAndSlice(b.set, compare.set) + return uint(cnt) +} + +// InPlaceIntersection destructively computes the intersection of +// base set and the compare set. +// This is the BitSet equivalent of & (and) +func (b *BitSet) InPlaceIntersection(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &= compare.set[i] + } + for i := l; i < len(b.set); i++ { + b.set[i] = 0 + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } +} + +// Union of base set and other set +// This is the BitSet equivalent of | (or) +func (b *BitSet) Union(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word | compare.set[i] + } + return +} + +// UnionCardinality computes the cardinality of the uniton of the base set +// and the compare set. +func (b *BitSet) UnionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntOrSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceUnion creates the destructive union of base set and compare set. +// This is the BitSet equivalent of | (or). +func (b *BitSet) InPlaceUnion(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] |= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + // compare is bigger, so clone it + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word ^ compare.set[i] + } + return +} + +// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference +func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntXorSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] ^= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// Is the length an exact multiple of word sizes? +func (b *BitSet) isLenExactMultiple() bool { + return b.length%wordSize == 0 +} + +// Clean last word by setting unused bits to 0 +func (b *BitSet) cleanLastWord() { + if !b.isLenExactMultiple() { + b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) + } +} + +// Complement computes the (local) complement of a biset (up to length bits) +func (b *BitSet) Complement() (result *BitSet) { + panicIfNull(b) + result = New(b.length) + for i, word := range b.set { + result.set[i] = ^word + } + result.cleanLastWord() + return +} + +// All returns true if all bits are set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) All() bool { + panicIfNull(b) + return b.Count() == b.length +} + +// None returns true if no bit is set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) None() bool { + panicIfNull(b) + if b != nil && b.set != nil { + for _, word := range b.set { + if word > 0 { + return false + } + } + return true + } + return true +} + +// Any returns true if any bit is set, false otherwise +func (b *BitSet) Any() bool { + panicIfNull(b) + return !b.None() +} + +// IsSuperSet returns true if this is a superset of the other set +func (b *BitSet) IsSuperSet(other *BitSet) bool { + for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { + if !b.Test(i) { + return false + } + } + return true +} + +// IsStrictSuperSet returns true if this is a strict superset of the other set +func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { + return b.Count() > other.Count() && b.IsSuperSet(other) +} + +// DumpAsBits dumps a bit set as a string of bits +func (b *BitSet) DumpAsBits() string { + if b.set == nil { + return "." + } + buffer := bytes.NewBufferString("") + i := len(b.set) - 1 + for ; i >= 0; i-- { + fmt.Fprintf(buffer, "%064b.", b.set[i]) + } + return buffer.String() +} + +// BinaryStorageSize returns the binary storage requirements +func (b *BitSet) BinaryStorageSize() int { + return binary.Size(uint64(0)) + binary.Size(b.set) +} + +// WriteTo writes a BitSet to a stream +func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { + length := uint64(b.length) + + // Write length + err := binary.Write(stream, binaryOrder, length) + if err != nil { + return 0, err + } + + // Write set + err = binary.Write(stream, binaryOrder, b.set) + return int64(b.BinaryStorageSize()), err +} + +// ReadFrom reads a BitSet from a stream written using WriteTo +func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { + var length uint64 + + // Read length first + err := binary.Read(stream, binaryOrder, &length) + if err != nil { + return 0, err + } + newset := New(uint(length)) + + if uint64(newset.length) != length { + return 0, errors.New("unmarshalling error: type mismatch") + } + + // Read remaining bytes as set + err = binary.Read(stream, binaryOrder, newset.set) + if err != nil { + return 0, err + } + + *b = *newset + return int64(b.BinaryStorageSize()), nil +} + +// MarshalBinary encodes a BitSet into a binary form and returns the result. +func (b *BitSet) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + _, err := b.WriteTo(writer) + if err != nil { + return []byte{}, err + } + + err = writer.Flush() + + return buf.Bytes(), err +} + +// UnmarshalBinary decodes the binary form generated by MarshalBinary. +func (b *BitSet) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + reader := bufio.NewReader(buf) + + _, err := b.ReadFrom(reader) + + return err +} + +// MarshalJSON marshals a BitSet as a JSON structure +func (b *BitSet) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) + _, err := b.WriteTo(buffer) + if err != nil { + return nil, err + } + + // URLEncode all bytes + return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) +} + +// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON +func (b *BitSet) UnmarshalJSON(data []byte) error { + // Unmarshal as string + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + // URLDecode string + buf, err := base64Encoding.DecodeString(s) + if err != nil { + return err + } + + _, err = b.ReadFrom(bytes.NewReader(buf)) + return err +} diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod new file mode 100644 index 000000000..c43e4522b --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/go.mod @@ -0,0 +1,3 @@ +module github.com/bits-and-blooms/bitset + +go 1.14 diff --git a/vendor/github.com/bits-and-blooms/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go new file mode 100644 index 000000000..76577a838 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt.go @@ -0,0 +1,53 @@ +package bitset + +// bit population count, take from +// https://code.google.com/p/go/issues/detail?id=4988#c11 +// credit: https://code.google.com/u/arnehormann/ +func popcount(x uint64) (n uint64) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return x >> 56 +} + +func popcntSliceGo(s []uint64) uint64 { + cnt := uint64(0) + for _, x := range s { + cnt += popcount(x) + } + return cnt +} + +func popcntMaskSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] &^ m[i]) + } + return cnt +} + +func popcntAndSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] & m[i]) + } + return cnt +} + +func popcntOrSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] | m[i]) + } + return cnt +} + +func popcntXorSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] ^ m[i]) + } + return cnt +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go new file mode 100644 index 000000000..fc8ff4f36 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go @@ -0,0 +1,45 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func popcntSlice(s []uint64) uint64 { + var cnt int + for _, x := range s { + cnt += bits.OnesCount64(x) + } + return uint64(cnt) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] &^ m[i]) + } + return uint64(cnt) +} + +func popcntAndSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] & m[i]) + } + return uint64(cnt) +} + +func popcntOrSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] | m[i]) + } + return uint64(cnt) +} + +func popcntXorSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] ^ m[i]) + } + return uint64(cnt) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go new file mode 100644 index 000000000..4cf64f24a --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go @@ -0,0 +1,68 @@ +// +build !go1.9 +// +build amd64,!appengine + +package bitset + +// *** the following functions are defined in popcnt_amd64.s + +//go:noescape + +func hasAsm() bool + +// useAsm is a flag used to select the GO or ASM implementation of the popcnt function +var useAsm = hasAsm() + +//go:noescape + +func popcntSliceAsm(s []uint64) uint64 + +//go:noescape + +func popcntMaskSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntAndSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntOrSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntXorSliceAsm(s, m []uint64) uint64 + +func popcntSlice(s []uint64) uint64 { + if useAsm { + return popcntSliceAsm(s) + } + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + if useAsm { + return popcntMaskSliceAsm(s, m) + } + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + if useAsm { + return popcntAndSliceAsm(s, m) + } + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + if useAsm { + return popcntOrSliceAsm(s, m) + } + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + if useAsm { + return popcntXorSliceAsm(s, m) + } + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s new file mode 100644 index 000000000..666c0dcc1 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s @@ -0,0 +1,104 @@ +// +build !go1.9 +// +build amd64,!appengine + +TEXT ·hasAsm(SB),4,$0-1 +MOVQ $1, AX +CPUID +SHRQ $23, CX +ANDQ $1, CX +MOVB CX, ret+0(FP) +RET + +#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 + +TEXT ·popcntSliceAsm(SB),4,$0-32 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntSliceEnd +popcntSliceLoop: +BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX +ADDQ DX, AX +ADDQ $8, SI +LOOP popcntSliceLoop +popcntSliceEnd: +MOVQ AX, ret+24(FP) +RET + +TEXT ·popcntMaskSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntMaskSliceEnd +MOVQ m+24(FP), DI +popcntMaskSliceLoop: +MOVQ (DI), DX +NOTQ DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntMaskSliceLoop +popcntMaskSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntAndSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntAndSliceEnd +MOVQ m+24(FP), DI +popcntAndSliceLoop: +MOVQ (DI), DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntAndSliceLoop +popcntAndSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntOrSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntOrSliceEnd +MOVQ m+24(FP), DI +popcntOrSliceLoop: +MOVQ (DI), DX +ORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntOrSliceLoop +popcntOrSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntXorSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntXorSliceEnd +MOVQ m+24(FP), DI +popcntXorSliceLoop: +MOVQ (DI), DX +XORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntXorSliceLoop +popcntXorSliceEnd: +MOVQ AX, ret+48(FP) +RET diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go new file mode 100644 index 000000000..21e0ff7b4 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go @@ -0,0 +1,24 @@ +// +build !go1.9 +// +build !amd64 appengine + +package bitset + +func popcntSlice(s []uint64) uint64 { + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go new file mode 100644 index 000000000..c52b61be9 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package bitset + +var deBruijn = [...]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +func trailingZeroes64(v uint64) uint { + return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go new file mode 100644 index 000000000..36a988e71 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go @@ -0,0 +1,9 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func trailingZeroes64(v uint64) uint { + return uint(bits.TrailingZeros64(v)) +} diff --git a/vendor/github.com/opencontainers/selinux/LICENSE b/vendor/github.com/opencontainers/selinux/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go new file mode 100644 index 000000000..0ac7d819e --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -0,0 +1,14 @@ +/* +Package selinux provides a high-level interface for interacting with selinux. + +Usage: + + import "github.com/opencontainers/selinux/go-selinux" + + // Ensure that selinux is enforcing mode. + if selinux.EnforceMode() != selinux.Enforcing { + selinux.SetEnforceMode(selinux.Enforcing) + } + +*/ +package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go new file mode 100644 index 000000000..b336ebad3 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -0,0 +1,284 @@ +package selinux + +import ( + "github.com/pkg/errors" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 + // maxCategory is the maximum number of categories used within containers + maxCategory = 1024 + // DefaultCategoryRange is the upper bound on the category range + DefaultCategoryRange = uint32(maxCategory) +) + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") + + // InvalidLabel is returned when an invalid label is specified. + InvalidLabel = errors.New("Invalid Label") + + // ErrIncomparable is returned two levels are not comparable + ErrIncomparable = errors.New("incomparable levels") + // ErrLevelSyntax is returned when a sensitivity or category do not have correct syntax in a level + ErrLevelSyntax = errors.New("invalid level syntax") + + // ErrContextMissing is returned if a requested context is not found in a file. + ErrContextMissing = errors.New("context does not have a match") + // ErrVerifierNil is returned when a context verifier function is nil. + ErrVerifierNil = errors.New("verifier function is nil") + + // CategoryRange allows the upper bound on the category range to be adjusted + CategoryRange = DefaultCategoryRange +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +// SetDisabled disables SELinux support for the package +func SetDisabled() { + setDisabled() +} + +// GetEnabled returns whether SELinux is currently enabled. +func GetEnabled() bool { + return getEnabled() +} + +// ClassIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func ClassIndex(class string) (int, error) { + return classIndex(class) +} + +// SetFileLabel sets the SELinux label for this path or returns an error. +func SetFileLabel(fpath string, label string) error { + return setFileLabel(fpath, label) +} + +// FileLabel returns the SELinux label for this path or returns an error. +func FileLabel(fpath string) (string, error) { + return fileLabel(fpath) +} + +// SetFSCreateLabel tells the kernel what label to use for all file system objects +// created by this task. +// Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until file system +// objects created by this task are finished to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetFSCreateLabel(label string) error { + return setFSCreateLabel(label) +} + +// FSCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func FSCreateLabel() (string, error) { + return fsCreateLabel() +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return currentLabel() +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return pidLabel(pid) +} + +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func ExecLabel() (string, error) { + return execLabel() +} + +// CanonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func CanonicalizeContext(val string) (string, error) { + return canonicalizeContext(val) +} + +// ComputeCreateContext requests the type transition from source to target for +// class from the kernel. +func ComputeCreateContext(source string, target string, class string) (string, error) { + return computeCreateContext(source, target, class) +} + +// CalculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func CalculateGlbLub(sourceRange, targetRange string) (string, error) { + return calculateGlbLub(sourceRange, targetRange) +} + +// SetExecLabel sets the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. Calls to SetExecLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until execution +// of the program is finished to guarantee another goroutine does not migrate to the current +// thread before execution is complete. +func SetExecLabel(label string) error { + return setExecLabel(label) +} + +// SetTaskLabel sets the SELinux label for the current thread, or an error. +// This requires the dyntransition permission. Calls to SetTaskLabel should +// be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee +// the current thread does not run in a new mislabeled thread. +func SetTaskLabel(label string) error { + return setTaskLabel(label) +} + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created. Calls to SetSocketLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until +// the the socket is created to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetSocketLabel(label string) error { + return setSocketLabel(label) +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return socketLabel() +} + +// PeerLabel retrieves the label of the client on the other side of a socket +func PeerLabel(fd uintptr) (string, error) { + return peerLabel(fd) +} + +// SetKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created. Calls to SetKeyLabel +// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until +// the kernel keyring is created to guarantee another goroutine does not migrate +// to the current thread before execution is complete. +func SetKeyLabel(label string) error { + return setKeyLabel(label) +} + +// KeyLabel retrieves the current kernel keyring label setting +func KeyLabel() (string, error) { + return keyLabel() +} + +// Get returns the Context as a string +func (c Context) Get() string { + return c.get() +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) (Context, error) { + return newContext(label) +} + +// ClearLabels clears all reserved labels +func ClearLabels() { + clearLabels() +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + reserveLabel(label) +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + return enforceMode() +} + +// SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func SetEnforceMode(mode int) error { + return setEnforceMode(mode) +} + +// DefaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is is just the default at boot time. +// EnforceMode tells you the systems current mode. +func DefaultEnforceMode() int { + return defaultEnforceMode() +} + +// ReleaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func ReleaseLabel(label string) { + releaseLabel(label) +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return roFileLabel() +} + +// KVMContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func KVMContainerLabels() (string, string) { + return kvmContainerLabels() +} + +// InitContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func InitContainerLabels() (string, string) { + return initContainerLabels() +} + +// ContainerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func ContainerLabels() (processLabel string, fileLabel string) { + return containerLabels() +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return securityCheckContext(val) +} + +// CopyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func CopyLevel(src, dest string) (string, error) { + return copyLevel(src, dest) +} + +// Chcon changes the fpath file object to the SELinux label label. +// If fpath is a directory and recurse is true, then Chcon walks the +// directory tree setting the label. +func Chcon(fpath string, label string, recurse bool) error { + return chcon(fpath, label, recurse) +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) ([]string, error) { + return dupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return disableSecOpt() +} + +// GetDefaultContextWithLevel gets a single context for the specified SELinux user +// identity that is reachable from the specified scon context. The context is based +// on the per-user /etc/selinux/{SELINUXTYPE}/contexts/users/ if it exists, +// and falls back to the global /etc/selinux/{SELINUXTYPE}/contexts/default_contexts +// file. +func GetDefaultContextWithLevel(user, level, scon string) (string, error) { + return getDefaultContextWithLevel(user, level, scon) +} + +// PrivContainerMountLabel returns mount label for privileged containers +func PrivContainerMountLabel() string { + return privContainerMountLabel +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go new file mode 100644 index 000000000..a91a116f8 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -0,0 +1,1212 @@ +package selinux + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/bits-and-blooms/bitset" + "github.com/opencontainers/selinux/pkg/pwalk" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + minSensLen = 2 + contextFile = "/usr/share/containers/selinux/contexts" + selinuxDir = "/etc/selinux/" + selinuxUsersDir = "contexts/users" + defaultContexts = "contexts/default_contexts" + selinuxConfig = selinuxDir + "config" + selinuxfsMount = "/sys/fs/selinux" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + xattrNameSelinux = "security.selinux" +) + +var policyRoot = filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) + +type selinuxState struct { + enabledSet bool + enabled bool + selinuxfsOnce sync.Once + selinuxfs string + mcsList map[string]bool + sync.Mutex +} + +type level struct { + sens uint + cats *bitset.BitSet +} + +type mlsRange struct { + low *level + high *level +} + +type defaultSECtx struct { + user, level, scon string + userRdr, defaultRdr io.Reader + + verifier func(string) error +} + +type levelItem byte + +const ( + sensitivity levelItem = 's' + category levelItem = 'c' +) + +var ( + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + readOnlyFileLabel string + state = selinuxState{ + mcsList: make(map[string]bool), + } + + // for attrPath() + attrPathOnce sync.Once + haveThreadSelf bool +) + +func (s *selinuxState) setEnable(enabled bool) bool { + s.Lock() + defer s.Unlock() + s.enabledSet = true + s.enabled = enabled + return s.enabled +} + +func (s *selinuxState) getEnabled() bool { + s.Lock() + enabled := s.enabled + enabledSet := s.enabledSet + s.Unlock() + if enabledSet { + return enabled + } + + enabled = false + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := CurrentLabel(); con != "kernel" { + enabled = true + } + } + return s.setEnable(enabled) +} + +// setDisabled disables SELinux support for the package +func setDisabled() { + state.setEnable(false) +} + +func verifySELinuxfsMount(mnt string) bool { + var buf unix.Statfs_t + for { + err := unix.Statfs(mnt, &buf) + if err == nil { + break + } + if err == unix.EAGAIN || err == unix.EINTR { + continue + } + return false + } + + if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) { + return false + } + if (buf.Flags & unix.ST_RDONLY) != 0 { + return false + } + + return true +} + +func findSELinuxfs() string { + // fast path: check the default mount first + if verifySELinuxfsMount(selinuxfsMount) { + return selinuxfsMount + } + + // check if selinuxfs is available before going the slow path + fs, err := ioutil.ReadFile("/proc/filesystems") + if err != nil { + return "" + } + if !bytes.Contains(fs, []byte("\tselinuxfs\n")) { + return "" + } + + // slow path: try to find among the mounts + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "" + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for { + mnt := findSELinuxfsMount(scanner) + if mnt == "" { // error or not found + return "" + } + if verifySELinuxfsMount(mnt) { + return mnt + } + } +} + +// findSELinuxfsMount returns a next selinuxfs mount point found, +// if there is one, or an empty string in case of EOF or error. +func findSELinuxfsMount(s *bufio.Scanner) string { + for s.Scan() { + txt := s.Bytes() + // The first field after - is fs type. + // Safe as spaces in mountpoints are encoded as \040 + if !bytes.Contains(txt, []byte(" - selinuxfs ")) { + continue + } + const mPos = 5 // mount point is 5th field + fields := bytes.SplitN(txt, []byte(" "), mPos+1) + if len(fields) < mPos+1 { + continue + } + return string(fields[mPos-1]) + } + + return "" +} + +func (s *selinuxState) getSELinuxfs() string { + s.selinuxfsOnce.Do(func() { + s.selinuxfs = findSELinuxfs() + }) + + return s.selinuxfs +} + +// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs +// filesystem or an empty string if no mountpoint is found. Selinuxfs is +// a proc-like pseudo-filesystem that exposes the SELinux policy API to +// processes. The existence of an selinuxfs mount is used to determine +// whether SELinux is currently enabled or not. +func getSelinuxMountPoint() string { + return state.getSELinuxfs() +} + +// getEnabled returns whether SELinux is currently enabled. +func getEnabled() bool { + return state.getEnabled() +} + +func readConfig(target string) string { + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + scanner := bufio.NewScanner(in) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func isProcHandle(fh *os.File) error { + var buf unix.Statfs_t + + for { + err := unix.Fstatfs(int(fh.Fd()), &buf) + if err == nil { + break + } + if err != unix.EINTR { + return errors.Wrapf(err, "statfs(%q) failed", fh.Name()) + } + } + if buf.Type != unix.PROC_SUPER_MAGIC { + return errors.Errorf("file %q is not on procfs", fh.Name()) + } + + return nil +} + +func readCon(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + in, err := os.Open(fpath) + if err != nil { + return "", err + } + defer in.Close() + + if err := isProcHandle(in); err != nil { + return "", err + } + + var retval string + if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil +} + +// classIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func classIndex(class string) (int, error) { + permpath := fmt.Sprintf("class/%s/index", class) + indexpath := filepath.Join(getSelinuxMountPoint(), permpath) + + indexB, err := ioutil.ReadFile(indexpath) + if err != nil { + return -1, err + } + index, err := strconv.Atoi(string(indexB)) + if err != nil { + return -1, err + } + + return index, nil +} + +// setFileLabel sets the SELinux label for this path or returns an error. +func setFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + for { + err := unix.Lsetxattr(fpath, xattrNameSelinux, []byte(label), 0) + if err == nil { + break + } + if err != unix.EINTR { + return errors.Wrapf(err, "failed to set file label on %s", fpath) + } + } + + return nil +} + +// fileLabel returns the SELinux label for this path or returns an error. +func fileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + label, err := lgetxattr(fpath, xattrNameSelinux) + if err != nil { + return "", err + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +// setFSCreateLabel tells kernel the label to create all file system objects +// created by this task. Setting label="" to return to default. +func setFSCreateLabel(label string) error { + return writeAttr("fscreate", label) +} + +// fsCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func fsCreateLabel() (string, error) { + return readAttr("fscreate") +} + +// currentLabel returns the SELinux label of the current process thread, or an error. +func currentLabel() (string, error) { + return readAttr("current") +} + +// pidLabel returns the SELinux label of the given pid, or an error. +func pidLabel(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func execLabel() (string, error) { + return readAttr("exec") +} + +func writeCon(fpath, val string) error { + if fpath == "" { + return ErrEmptyPath + } + if val == "" { + if !getEnabled() { + return nil + } + } + + out, err := os.OpenFile(fpath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if err := isProcHandle(out); err != nil { + return err + } + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + if err != nil { + return errors.Wrapf(err, "failed to set %s on procfs", fpath) + } + return nil +} + +func attrPath(attr string) string { + // Linux >= 3.17 provides this + const threadSelfPrefix = "/proc/thread-self/attr" + + attrPathOnce.Do(func() { + st, err := os.Stat(threadSelfPrefix) + if err == nil && st.Mode().IsDir() { + haveThreadSelf = true + } + }) + + if haveThreadSelf { + return path.Join(threadSelfPrefix, attr) + } + + return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) +} + +func readAttr(attr string) (string, error) { + return readCon(attrPath(attr)) +} + +func writeAttr(attr, val string) error { + return writeCon(attrPath(attr), val) +} + +// canonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func canonicalizeContext(val string) (string, error) { + return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) +} + +// computeCreateContext requests the type transition from source to target for +// class from the kernel. +func computeCreateContext(source string, target string, class string) (string, error) { + classidx, err := classIndex(class) + if err != nil { + return "", err + } + + return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx)) +} + +// catsToBitset stores categories in a bitset. +func catsToBitset(cats string) (*bitset.BitSet, error) { + bitset := &bitset.BitSet{} + + catlist := strings.Split(cats, ",") + for _, r := range catlist { + ranges := strings.SplitN(r, ".", 2) + if len(ranges) > 1 { + catstart, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + catend, err := parseLevelItem(ranges[1], category) + if err != nil { + return nil, err + } + for i := catstart; i <= catend; i++ { + bitset.Set(i) + } + } else { + cat, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + bitset.Set(cat) + } + } + + return bitset, nil +} + +// parseLevelItem parses and verifies that a sensitivity or category are valid +func parseLevelItem(s string, sep levelItem) (uint, error) { + if len(s) < minSensLen || levelItem(s[0]) != sep { + return 0, ErrLevelSyntax + } + val, err := strconv.ParseUint(s[1:], 10, 32) + if err != nil { + return 0, err + } + + return uint(val), nil +} + +// parseLevel fills a level from a string that contains +// a sensitivity and categories +func (l *level) parseLevel(levelStr string) error { + lvl := strings.SplitN(levelStr, ":", 2) + sens, err := parseLevelItem(lvl[0], sensitivity) + if err != nil { + return errors.Wrap(err, "failed to parse sensitivity") + } + l.sens = sens + if len(lvl) > 1 { + cats, err := catsToBitset(lvl[1]) + if err != nil { + return errors.Wrap(err, "failed to parse categories") + } + l.cats = cats + } + + return nil +} + +// rangeStrToMLSRange marshals a string representation of a range. +func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { + mlsRange := &mlsRange{} + levelSlice := strings.SplitN(rangeStr, "-", 2) + + switch len(levelSlice) { + // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 + case 2: + mlsRange.high = &level{} + if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { + return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1]) + } + fallthrough + // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 + case 1: + mlsRange.low = &level{} + if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { + return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0]) + } + } + + if mlsRange.high == nil { + mlsRange.high = mlsRange.low + } + + return mlsRange, nil +} + +// bitsetToStr takes a category bitset and returns it in the +// canonical selinux syntax +func bitsetToStr(c *bitset.BitSet) string { + var str string + i, e := c.NextSet(0) + len := 0 + for e { + if len == 0 { + if str != "" { + str += "," + } + str += "c" + strconv.Itoa(int(i)) + } + + next, e := c.NextSet(i + 1) + if e { + // consecutive cats + if next == i+1 { + len++ + i = next + continue + } + } + if len == 1 { + str += ",c" + strconv.Itoa(int(i)) + } else if len > 1 { + str += ".c" + strconv.Itoa(int(i)) + } + if !e { + break + } + len = 0 + i = next + } + + return str +} + +func (l1 *level) equal(l2 *level) bool { + if l2 == nil || l1 == nil { + return l1 == l2 + } + if l1.sens != l2.sens { + return false + } + return l1.cats.Equal(l2.cats) +} + +// String returns an mlsRange as a string. +func (m mlsRange) String() string { + low := "s" + strconv.Itoa(int(m.low.sens)) + if m.low.cats != nil && m.low.cats.Count() > 0 { + low += ":" + bitsetToStr(m.low.cats) + } + + if m.low.equal(m.high) { + return low + } + + high := "s" + strconv.Itoa(int(m.high.sens)) + if m.high.cats != nil && m.high.cats.Count() > 0 { + high += ":" + bitsetToStr(m.high.cats) + } + + return low + "-" + high +} + +func max(a, b uint) uint { + if a > b { + return a + } + return b +} + +func min(a, b uint) uint { + if a < b { + return a + } + return b +} + +// calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func calculateGlbLub(sourceRange, targetRange string) (string, error) { + s, err := rangeStrToMLSRange(sourceRange) + if err != nil { + return "", err + } + t, err := rangeStrToMLSRange(targetRange) + if err != nil { + return "", err + } + + if s.high.sens < t.low.sens || t.high.sens < s.low.sens { + /* these ranges have no common sensitivities */ + return "", ErrIncomparable + } + + outrange := &mlsRange{low: &level{}, high: &level{}} + + /* take the greatest of the low */ + outrange.low.sens = max(s.low.sens, t.low.sens) + + /* take the least of the high */ + outrange.high.sens = min(s.high.sens, t.high.sens) + + /* find the intersecting categories */ + if s.low.cats != nil && t.low.cats != nil { + outrange.low.cats = s.low.cats.Intersection(t.low.cats) + } + if s.high.cats != nil && t.high.cats != nil { + outrange.high.cats = s.high.cats.Intersection(t.high.cats) + } + + return outrange.String(), nil +} + +func readWriteCon(fpath string, val string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + f, err := os.OpenFile(fpath, os.O_RDWR, 0) + if err != nil { + return "", err + } + defer f.Close() + + _, err = f.Write([]byte(val)) + if err != nil { + return "", err + } + + var retval string + if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil +} + +// setExecLabel sets the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func setExecLabel(label string) error { + return writeAttr("exec", label) +} + +// setTaskLabel sets the SELinux label for the current thread, or an error. +// This requires the dyntransition permission. +func setTaskLabel(label string) error { + return writeAttr("current", label) +} + +// setSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func setSocketLabel(label string) error { + return writeAttr("sockcreate", label) +} + +// socketLabel retrieves the current socket label setting +func socketLabel() (string, error) { + return readAttr("sockcreate") +} + +// peerLabel retrieves the label of the client on the other side of a socket +func peerLabel(fd uintptr) (string, error) { + return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) +} + +// setKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created +func setKeyLabel(label string) error { + err := writeCon("/proc/self/attr/keycreate", label) + if os.IsNotExist(errors.Cause(err)) { + return nil + } + if label == "" && os.IsPermission(errors.Cause(err)) { + return nil + } + return err +} + +// keyLabel retrieves the current kernel keyring label setting +func keyLabel() (string, error) { + return readCon("/proc/self/attr/keycreate") +} + +// get returns the Context as a string +func (c Context) get() string { + if c["level"] != "" { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) + } + return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) +} + +// newContext creates a new Context struct from the specified label +func newContext(label string) (Context, error) { + c := make(Context) + + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) < 3 { + return c, InvalidLabel + } + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + if len(con) > 3 { + c["level"] = con[3] + } + } + return c, nil +} + +// clearLabels clears all reserved labels +func clearLabels() { + state.Lock() + state.mcsList = make(map[string]bool) + state.Unlock() +} + +// reserveLabel reserves the MLS/MCS level component of the specified label +func reserveLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + _ = mcsAdd(con[3]) + } + } +} + +func selinuxEnforcePath() string { + return path.Join(getSelinuxMountPoint(), "enforce") +} + +// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func enforceMode() int { + var enforce int + + enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) + if err != nil { + return -1 + } + enforce, err = strconv.Atoi(string(enforceB)) + if err != nil { + return -1 + } + return enforce +} + +// setEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func setEnforceMode(mode int) error { + return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644) +} + +// defaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is is just the default at boot time. +// EnforceMode tells you the systems current mode. +func defaultEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcs == "" { + return nil + } + state.Lock() + defer state.Unlock() + if state.mcsList[mcs] { + return ErrMCSAlreadyExists + } + state.mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + if mcs == "" { + return + } + state.Lock() + defer state.Unlock() + state.mcsList[mcs] = false +} + +func intToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD -= TIER + TIER-- + } + TIER = SETSIZE - TIER + ORD += TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + _ = binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + _ = binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else if c1 > c2 { + c1, c2 = c2, c1 + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +// releaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func releaseLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + mcsDelete(con[3]) + } + } +} + +// roFileLabel returns the specified SELinux readonly file label +func roFileLabel() string { + return readOnlyFileLabel +} + +func openContextFile() (*os.File, error) { + if f, err := os.Open(contextFile); err == nil { + return f, nil + } + lxcPath := filepath.Join(policyRoot, "/contexts/lxc_contexts") + return os.Open(lxcPath) +} + +var labels, privContainerMountLabel = loadLabels() + +func loadLabels() (map[string]string, string) { + labels := make(map[string]string) + in, err := openContextFile() + if err != nil { + return labels, "" + } + defer in.Close() + + scanner := bufio.NewScanner(in) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + labels[key] = strings.Trim(val, "\"") + } + } + + con, _ := NewContext(labels["file"]) + con["level"] = fmt.Sprintf("s0:c%d,c%d", maxCategory-2, maxCategory-1) + reserveLabel(con.get()) + return labels, con.get() +} + +// kvmContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func kvmContainerLabels() (string, string) { + processLabel := labels["kvm_process"] + if processLabel == "" { + processLabel = labels["process"] + } + + return addMcs(processLabel, labels["file"]) +} + +// initContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func initContainerLabels() (string, string) { + processLabel := labels["init_process"] + if processLabel == "" { + processLabel = labels["process"] + } + + return addMcs(processLabel, labels["file"]) +} + +// containerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func containerLabels() (processLabel string, fileLabel string) { + if !getEnabled() { + return "", "" + } + + processLabel = labels["process"] + fileLabel = labels["file"] + readOnlyFileLabel = labels["ro_file"] + + if processLabel == "" || fileLabel == "" { + return "", fileLabel + } + + if readOnlyFileLabel == "" { + readOnlyFileLabel = fileLabel + } + + return addMcs(processLabel, fileLabel) +} + +func addMcs(processLabel, fileLabel string) (string, string) { + scon, _ := NewContext(processLabel) + if scon["level"] != "" { + mcs := uniqMcs(CategoryRange) + scon["level"] = mcs + processLabel = scon.Get() + scon, _ = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + } + return processLabel, fileLabel +} + +// securityCheckContext validates that the SELinux label is understood by the kernel +func securityCheckContext(val string) error { + return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644) +} + +// copyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func copyLevel(src, dest string) (string, error) { + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon, err := NewContext(src) + if err != nil { + return "", err + } + tcon, err := NewContext(dest) + if err != nil { + return "", err + } + mcsDelete(tcon["level"]) + _ = mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// Prevent users from relabeling system files +func badPrefix(fpath string) error { + if fpath == "" { + return ErrEmptyPath + } + + badPrefixes := []string{"/usr"} + for _, prefix := range badPrefixes { + if strings.HasPrefix(fpath, prefix) { + return errors.Errorf("relabeling content in %s is not allowed", prefix) + } + } + return nil +} + +// chcon changes the fpath file object to the SELinux label label. +// If fpath is a directory and recurse is true, then chcon walks the +// directory tree setting the label. +func chcon(fpath string, label string, recurse bool) error { + if fpath == "" { + return ErrEmptyPath + } + if label == "" { + return nil + } + if err := badPrefix(fpath); err != nil { + return err + } + + if !recurse { + return SetFileLabel(fpath, label) + } + + return pwalk.Walk(fpath, func(p string, info os.FileInfo, err error) error { + e := SetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT + if os.IsNotExist(errors.Cause(e)) { + return nil + } + return e + }) +} + +// dupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func dupSecOpt(src string) ([]string, error) { + if src == "" { + return nil, nil + } + con, err := NewContext(src) + if err != nil { + return nil, err + } + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" { + return nil, nil + } + dup := []string{"user:" + con["user"], + "role:" + con["role"], + "type:" + con["type"], + } + + if con["level"] != "" { + dup = append(dup, "level:"+con["level"]) + } + + return dup, nil +} + +// disableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func disableSecOpt() []string { + return []string{"disable"} +} + +// findUserInContext scans the reader for a valid SELinux context +// match that is verified with the verifier. Invalid contexts are +// skipped. It returns a matched context or an empty string if no +// match is found. If a scanner error occurs, it is returned. +func findUserInContext(context Context, r io.Reader, verifier func(string) error) (string, error) { + fromRole := context["role"] + fromType := context["type"] + scanner := bufio.NewScanner(r) + + for scanner.Scan() { + fromConns := strings.Fields(scanner.Text()) + if len(fromConns) == 0 { + // Skip blank lines + continue + } + + line := fromConns[0] + + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + // user context files contexts are formatted as + // role_r:type_t:s0 where the user is missing. + lineArr := strings.SplitN(line, ":", 4) + // skip context with typo, or role and type do not match + if len(lineArr) != 3 || + lineArr[0] != fromRole || + lineArr[1] != fromType { + continue + } + + for _, cc := range fromConns[1:] { + toConns := strings.SplitN(cc, ":", 4) + if len(toConns) != 3 { + continue + } + + context["role"] = toConns[0] + context["type"] = toConns[1] + + outConn := context.get() + if err := verifier(outConn); err != nil { + continue + } + + return outConn, nil + } + } + + if err := scanner.Err(); err != nil { + return "", errors.Wrap(err, "failed to scan for context") + } + + return "", nil +} + +func getDefaultContextFromReaders(c *defaultSECtx) (string, error) { + if c.verifier == nil { + return "", ErrVerifierNil + } + + context, err := newContext(c.scon) + if err != nil { + return "", errors.Wrapf(err, "failed to create label for %s", c.scon) + } + + // set so the verifier validates the matched context with the provided user and level. + context["user"] = c.user + context["level"] = c.level + + conn, err := findUserInContext(context, c.userRdr, c.verifier) + if err != nil { + return "", err + } + + if conn != "" { + return conn, nil + } + + conn, err = findUserInContext(context, c.defaultRdr, c.verifier) + if err != nil { + return "", err + } + + if conn != "" { + return conn, nil + } + + return "", errors.Wrapf(ErrContextMissing, "context not found: %q", c.scon) +} + +func getDefaultContextWithLevel(user, level, scon string) (string, error) { + userPath := filepath.Join(policyRoot, selinuxUsersDir, user) + defaultPath := filepath.Join(policyRoot, defaultContexts) + + fu, err := os.Open(userPath) + if err != nil { + return "", err + } + defer fu.Close() + + fd, err := os.Open(defaultPath) + if err != nil { + return "", err + } + defer fd.Close() + + c := defaultSECtx{ + user: user, + level: level, + scon: scon, + userRdr: fu, + defaultRdr: fd, + verifier: securityCheckContext, + } + + return getDefaultContextFromReaders(&c) +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go new file mode 100644 index 000000000..b7218a0b6 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -0,0 +1,154 @@ +// +build !linux + +package selinux + +const privContainerMountLabel = "" + +func setDisabled() { +} + +func getEnabled() bool { + return false +} + +func classIndex(class string) (int, error) { + return -1, nil +} + +func setFileLabel(fpath string, label string) error { + return nil +} + +func fileLabel(fpath string) (string, error) { + return "", nil +} + +func setFSCreateLabel(label string) error { + return nil +} + +func fsCreateLabel() (string, error) { + return "", nil +} + +func currentLabel() (string, error) { + return "", nil +} + +func pidLabel(pid int) (string, error) { + return "", nil +} + +func execLabel() (string, error) { + return "", nil +} + +func canonicalizeContext(val string) (string, error) { + return "", nil +} + +func computeCreateContext(source string, target string, class string) (string, error) { + return "", nil +} + +func calculateGlbLub(sourceRange, targetRange string) (string, error) { + return "", nil +} + +func setExecLabel(label string) error { + return nil +} + +func setTaskLabel(label string) error { + return nil +} + +func setSocketLabel(label string) error { + return nil +} + +func socketLabel() (string, error) { + return "", nil +} + +func peerLabel(fd uintptr) (string, error) { + return "", nil +} + +func setKeyLabel(label string) error { + return nil +} + +func keyLabel() (string, error) { + return "", nil +} + +func (c Context) get() string { + return "" +} + +func newContext(label string) (Context, error) { + c := make(Context) + return c, nil +} + +func clearLabels() { +} + +func reserveLabel(label string) { +} + +func enforceMode() int { + return Disabled +} + +func setEnforceMode(mode int) error { + return nil +} + +func defaultEnforceMode() int { + return Disabled +} + +func releaseLabel(label string) { +} + +func roFileLabel() string { + return "" +} + +func kvmContainerLabels() (string, string) { + return "", "" +} + +func initContainerLabels() (string, string) { + return "", "" +} + +func containerLabels() (processLabel string, fileLabel string) { + return "", "" +} + +func securityCheckContext(val string) error { + return nil +} + +func copyLevel(src, dest string) (string, error) { + return "", nil +} + +func chcon(fpath string, label string, recurse bool) error { + return nil +} + +func dupSecOpt(src string) ([]string, error) { + return nil, nil +} + +func disableSecOpt() []string { + return []string{"disable"} +} + +func getDefaultContextWithLevel(user, level, scon string) (string, error) { + return "", nil +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go new file mode 100644 index 000000000..117c255be --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go @@ -0,0 +1,38 @@ +package selinux + +import ( + "golang.org/x/sys/unix" +) + +// lgetxattr returns a []byte slice containing the value of +// an extended attribute attr set for path. +func lgetxattr(path, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := doLgetxattr(path, attr, dest) + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = doLgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + + dest = make([]byte, sz) + sz, errno = doLgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// doLgetxattr is a wrapper that retries on EINTR +func doLgetxattr(path, attr string, dest []byte) (int, error) { + for { + sz, err := unix.Lgetxattr(path, attr, dest) + if err != unix.EINTR { + return sz, err + } + } +} diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md new file mode 100644 index 000000000..16c4dfd3e --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md @@ -0,0 +1,42 @@ +## pwalk: parallel implementation of filepath.Walk + +This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk) +which may speed it up by calling multiple callback functions (WalkFunc) in parallel, +utilizing goroutines. + +By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. +This can be changed by using WalkN function which has the additional +parameter, specifying the number of goroutines (concurrency). + +### Caveats + +Please note the following limitations of this code: + +* Unlike filepath.Walk, the order of calls is non-deterministic; + +* Only primitive error handling is supported: + + * filepath.SkipDir is not supported; + + * no errors are ever passed to WalkFunc; + + * once any error is returned from any WalkFunc instance, no more new calls + to WalkFunc are made, and the error is returned to the caller of Walk; + + * if more than one walkFunc instance will return an error, only one + of such errors will be propagated and returned by Walk, others + will be silently discarded. + +### Documentation + +For the official documentation, see +https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc + +### Benchmarks + +For a WalkFunc that consists solely of the return statement, this +implementation is about 10% slower than the standard library's +filepath.Walk. + +Otherwise (if a WalkFunc is doing something) this is usually faster, +except when the WalkN(..., 1) is used. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go new file mode 100644 index 000000000..437b12b3e --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go @@ -0,0 +1,104 @@ +package pwalk + +import ( + "os" + "path/filepath" + "runtime" + "sync" + + "github.com/pkg/errors" +) + +type WalkFunc = filepath.WalkFunc + +// Walk is a wrapper for filepath.Walk which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// twice the runtime.NumCPU() walkFn will be called at any one time. +// If you want to change the maximum, use WalkN instead. +// +// The order of calls is non-deterministic. +// +// Note that this implementation only supports primitive error handling: +// +// - no errors are ever passed to WalkFn; +// +// - once a walkFn returns any error, all further processing stops +// and the error is returned to the caller of Walk; +// +// - filepath.SkipDir is not supported; +// +// - if more than one walkFn instance will return an error, only one +// of such errors will be propagated and returned by Walk, others +// will be silently discarded. +func Walk(root string, walkFn WalkFunc) error { + return WalkN(root, walkFn, runtime.NumCPU()*2) +} + +// WalkN is a wrapper for filepath.Walk which can call multiple walkFn +// in parallel, allowing to handle each item concurrently. A maximum of +// num walkFn will be called at any one time. +// +// Please see Walk documentation for caveats of using this function. +func WalkN(root string, walkFn WalkFunc, num int) error { + // make sure limit is sensible + if num < 1 { + return errors.Errorf("walk(%q): num must be > 0", root) + } + + files := make(chan *walkArgs, 2*num) + errCh := make(chan error, 1) // get the first error, ignore others + + // Start walking a tree asap + var ( + err error + wg sync.WaitGroup + ) + wg.Add(1) + go func() { + err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { + if err != nil { + close(files) + return err + } + // add a file to the queue unless a callback sent an error + select { + case e := <-errCh: + close(files) + return e + default: + files <- &walkArgs{path: p, info: &info} + return nil + } + }) + if err == nil { + close(files) + } + wg.Done() + }() + + wg.Add(num) + for i := 0; i < num; i++ { + go func() { + for file := range files { + if e := walkFn(file.path, *file.info, nil); e != nil { + select { + case errCh <- e: // sent ok + default: // buffer full + } + } + } + wg.Done() + }() + } + + wg.Wait() + + return err +} + +// walkArgs holds the arguments that were passed to the Walk or WalkLimit +// functions. +type walkArgs struct { + path string + info *os.FileInfo +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index 7e0185001..f03e3be48 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -362,6 +362,16 @@ func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSe return cs, nil } +// NewEgressSelectorWithMap returns a EgressSelector with the supplied EgressType to DialFunc map. +func NewEgressSelectorWithMap(m map[EgressType]utilnet.DialFunc) *EgressSelector { + if m == nil { + m = make(map[EgressType]utilnet.DialFunc) + } + return &EgressSelector{ + egressToDialer: m, + } +} + // Lookup gets the dialer function for the network context. // This is configured for the Kubernetes API Server at startup. func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go index c38ebc076..6eee281bc 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -165,7 +165,7 @@ func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[str newExtension := (*in)[key] oldExtension := runtime.RawExtension{} if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { - return nil + return err } namedExtension := NamedExtension{key, oldExtension} *out = append(*out, namedExtension) diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index 2c978a1be..53bdd5fd8 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -9,21 +9,23 @@ require ( github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 - k8s.io/api v0.22.2 - k8s.io/apimachinery v0.22.2 - k8s.io/apiserver v0.22.2 - k8s.io/client-go v0.22.2 - k8s.io/component-base v0.22.2 - k8s.io/controller-manager v0.22.2 + k8s.io/api v0.22.4 + k8s.io/apimachinery v0.22.4 + k8s.io/apiserver v0.22.4 + k8s.io/client-go v0.22.4 + k8s.io/component-base v0.22.4 + k8s.io/controller-manager v0.22.4 k8s.io/klog/v2 v2.9.0 k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a ) replace ( - k8s.io/api => k8s.io/api v0.22.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.22.2 - k8s.io/apiserver => k8s.io/apiserver v0.22.2 - k8s.io/client-go => k8s.io/client-go v0.22.2 - k8s.io/component-base => k8s.io/component-base v0.22.2 - k8s.io/controller-manager => k8s.io/controller-manager v0.22.2 + k8s.io/api => k8s.io/api v0.22.4 + k8s.io/apimachinery => k8s.io/apimachinery v0.22.4 + k8s.io/apiserver => k8s.io/apiserver v0.22.4 + k8s.io/client-go => k8s.io/client-go v0.22.4 + k8s.io/component-base => k8s.io/component-base v0.22.4 + k8s.io/controller-manager => k8s.io/controller-manager v0.22.4 ) + +replace k8s.io/component-helpers => k8s.io/component-helpers v0.22.4 diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 5e2b2eacc..d6b2f2260 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -776,24 +776,24 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= -k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= -k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= -k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= -k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= -k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= -k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= -k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= -k8s.io/controller-manager v0.22.2 h1:4JbMHSia+Ys80FAMW35mlkbNG+IBGemPOk0wWDkiWYo= -k8s.io/controller-manager v0.22.2/go.mod h1:zeDUbCc66IcMZ81U8qC5Z5pm9A8QkqD7839H8t7//yY= +k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw= +k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= +k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck= +k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= +k8s.io/apiserver v0.22.4 h1:L+220cy+94UWmyBl1kiVTklBXrBtKsbjlPV60eL2u6s= +k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E= +k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg= +k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= +k8s.io/component-base v0.22.4 h1:7qwLJnua2ppGNZrRGDQ0vhsFebI39VGbZ4zdR5ArViI= +k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= +k8s.io/controller-manager v0.22.4 h1:4Iyn0IaDa3M7CJlkFp/ASdXtxeDDi9N63o0xNf+/sy4= +k8s.io/controller-manager v0.22.4/go.mod h1:DcJNoo4OvXCh9KfESIrX9C9dNQj1OfQrAZrEkFbNMRw= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go index 91a76ba7e..456fe0b0a 100644 --- a/vendor/k8s.io/component-base/metrics/options.go +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -58,8 +58,8 @@ func (o *Options) Validate() []error { // AddFlags adds flags for exposing component metrics. func (o *Options) AddFlags(fs *pflag.FlagSet) { - if o != nil { - o = NewOptions() + if o == nil { + return } fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, "The previous version for which you want to show hidden metrics. "+ diff --git a/vendor/k8s.io/kubernetes/pkg/util/selinux/doc.go b/vendor/k8s.io/kubernetes/pkg/util/selinux/doc.go new file mode 100644 index 000000000..2757203f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/selinux/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package selinux contains wrapper functions for the libcontainer SELinux +// package. A NOP implementation is provided for non-linux platforms. +package selinux // import "k8s.io/kubernetes/pkg/util/selinux" diff --git a/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux.go b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux.go new file mode 100644 index 000000000..c367f7bbe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +// Note: the libcontainer SELinux package is only built for Linux, so it is +// necessary to have a NOP wrapper which is built for non-Linux platforms to +// allow code that links to this package not to differentiate its own methods +// for Linux and non-Linux platforms. +// +// SELinuxRunner wraps certain libcontainer SELinux calls. For more +// information, see: +// +// https://github.com/opencontainers/runc/blob/master/libcontainer/selinux/selinux.go +type SELinuxRunner interface { + // Getfilecon returns the SELinux context for the given path or returns an + // error. + Getfilecon(path string) (string, error) +} + +// NewSELinuxRunner returns a new SELinuxRunner appropriate for the platform. +// On Linux, all methods short-circuit and return NOP values if SELinux is +// disabled. On non-Linux platforms, a NOP implementation is returned. +func NewSELinuxRunner() SELinuxRunner { + return &realSELinuxRunner{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go new file mode 100644 index 000000000..33ae35884 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go @@ -0,0 +1,57 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + selinux "github.com/opencontainers/selinux/go-selinux" +) + +// SELinuxEnabled returns whether SELinux is enabled on the system. SELinux +// has a tri-state: +// +// 1. disabled: SELinux Kernel modules not loaded, SELinux policy is not +// checked during Kernel MAC checks +// 2. enforcing: Enabled; SELinux policy violations are denied and logged +// in the audit log +// 3. permissive: Enabled, but SELinux policy violations are permitted and +// logged in the audit log +// +// SELinuxEnabled returns true if SELinux is enforcing or permissive, and +// false if it is disabled. +func SELinuxEnabled() bool { + return selinux.GetEnabled() +} + +// realSELinuxRunner is the real implementation of SELinuxRunner interface for +// Linux. +type realSELinuxRunner struct{} + +var _ SELinuxRunner = &realSELinuxRunner{} + +func (_ *realSELinuxRunner) Getfilecon(path string) (string, error) { + if !SELinuxEnabled() { + return "", nil + } + return selinux.FileLabel(path) +} + +// SetFileLabel applies the SELinux label on the path or returns an error. +func SetFileLabel(path string, label string) error { + return selinux.SetFileLabel(path, label) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go new file mode 100644 index 000000000..4c8f5f0b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go @@ -0,0 +1,38 @@ +// +build !linux + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +// SELinuxEnabled always returns false on non-linux platforms. +func SELinuxEnabled() bool { + return false +} + +// realSELinuxRunner is the NOP implementation of the SELinuxRunner interface. +type realSELinuxRunner struct{} + +var _ SELinuxRunner = &realSELinuxRunner{} + +func (_ *realSELinuxRunner) Getfilecon(path string) (string, error) { + return "", nil +} + +// FileLabel returns the SELinux label for this path or returns an error. +func SetFileLabel(path string, label string) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go index cc5fe628a..36b72e5e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go @@ -108,7 +108,7 @@ func (hu *FakeHostUtil) GetOwner(pathname string) (int64, int64, error) { // GetSELinuxSupport tests if pathname is on a mount that supports SELinux. // Not implemented for testing func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) { - return false, errors.New("GetSELinuxSupport not implemented") + return false, nil } // GetMode returns permissions of pathname. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go index 160d22c28..ab1c52335 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go @@ -28,6 +28,7 @@ import ( "golang.org/x/sys/unix" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/util/selinux" "k8s.io/mount-utils" utilpath "k8s.io/utils/path" ) @@ -229,8 +230,16 @@ func DoMakeRShared(path string, mountInfoFilename string) error { return nil } +// selinux.SELinuxEnabled implementation for unit tests +type seLinuxEnabledFunc func() bool + // GetSELinux is common implementation of GetSELinuxSupport on Linux. -func GetSELinux(path string, mountInfoFilename string) (bool, error) { +func GetSELinux(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (bool, error) { + // Skip /proc/mounts parsing if SELinux is disabled. + if !selinuxEnabled() { + return false, nil + } + info, err := findMountInfo(path, mountInfoFilename) if err != nil { return false, err @@ -253,7 +262,7 @@ func GetSELinux(path string, mountInfoFilename string) (bool, error) { // GetSELinuxSupport returns true if given path is on a mount that supports // SELinux. func (hu *HostUtil) GetSELinuxSupport(pathname string) (bool, error) { - return GetSELinux(pathname, procMountInfoPath) + return GetSELinux(pathname, procMountInfoPath, selinux.SELinuxEnabled) } // GetOwner returns the integer ID for the user and group of the given path diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go index 84cdf5e10..263bd8f9d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go @@ -29,7 +29,6 @@ import ( "golang.org/x/sys/unix" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/mount-utils" ) @@ -109,12 +108,12 @@ func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, strin notMount = true } if !notMount { - linuxHostUtil := hostutil.NewHostUtil() - mntInfo, err := linuxHostUtil.FindMountInfo(bindPathTarget) + // It's already mounted, so check if it's bind-mounted to the same path + samePath, err := checkSubPathFileEqual(subpath, bindPathTarget) if err != nil { - return false, "", fmt.Errorf("error calling findMountInfo for %s: %s", bindPathTarget, err) + return false, "", fmt.Errorf("error checking subpath mount info for %s: %s", bindPathTarget, err) } - if mntInfo.Root != subpath.Path { + if !samePath { // It's already mounted but not what we want, unmount it if err = mounter.Unmount(bindPathTarget); err != nil { return false, "", fmt.Errorf("error ummounting %s: %s", bindPathTarget, err) @@ -155,6 +154,23 @@ func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, strin return false, bindPathTarget, nil } +func checkSubPathFileEqual(subpath Subpath, bindMountTarget string) (bool, error) { + s, err := os.Lstat(subpath.Path) + if err != nil { + return false, fmt.Errorf("stat %s failed: %s", subpath.Path, err) + } + + t, err := os.Lstat(bindMountTarget) + if err != nil { + return false, fmt.Errorf("lstat %s failed: %s", bindMountTarget, err) + } + + if !os.SameFile(s, t) { + return false, nil + } + return true, nil +} + func getSubpathBindTarget(subpath Subpath) string { // containerName is DNS label, i.e. safe as a directory name. return filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName, strconv.Itoa(subpath.VolumeMountIndex)) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go b/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go index f13a5cb2f..db4cf0fe9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go @@ -368,6 +368,16 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC var gracePeriod int64 = 1 var command string + /** + This condition fixes running storage e2e tests in SELinux environment. + HostPath Volume Plugin creates a directory within /tmp on host machine, to be mounted as volume. + Inject-pod writes content to the volume, and a client-pod tries the read the contents and verify. + When SELinux is enabled on the host, client-pod can not read the content, with permission denied. + Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host. + */ + if config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" { + privileged = true + } command = "while true ; do sleep 2; done " seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"} clientPod := &v1.Pod{ diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 67a838b88..4c6358b6b 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -214,7 +214,7 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) { configs[CheckMetadataConcealment] = Config{list.PromoterE2eRegistry, "metadata-concealment", "1.6"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"} - configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "buster-v1.6.5"} + configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "buster-v1.6.7"} configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.3"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.4.13-0"} configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"} diff --git a/vendor/k8s.io/mount-utils/mount_helper_windows.go b/vendor/k8s.io/mount-utils/mount_helper_windows.go index 865ab5c32..2dbbc73e9 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_windows.go +++ b/vendor/k8s.io/mount-utils/mount_helper_windows.go @@ -84,15 +84,9 @@ func NormalizeWindowsPath(path string) string { // ValidateDiskNumber : disk number should be a number in [0, 99] func ValidateDiskNumber(disk string) error { - diskNum, err := strconv.Atoi(disk) - if err != nil { - return fmt.Errorf("wrong disk number format: %q, err:%v", disk, err) + if _, err := strconv.Atoi(disk); err != nil { + return fmt.Errorf("wrong disk number format: %q, err: %v", disk, err) } - - if diskNum < 0 || diskNum > 99 { - return fmt.Errorf("disk number out of range: %q", disk) - } - return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1a4ac37c6..bf1630a98 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -49,6 +49,8 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile +# github.com/bits-and-blooms/bitset v1.2.0 +github.com/bits-and-blooms/bitset # github.com/blang/semver v3.5.1+incompatible github.com/blang/semver # github.com/cenkalti/backoff/v3 v3.0.0 @@ -309,6 +311,9 @@ github.com/opencontainers/go-digest # github.com/opencontainers/runc v1.0.2 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/utils +# github.com/opencontainers/selinux v1.8.2 +github.com/opencontainers/selinux/go-selinux +github.com/opencontainers/selinux/pkg/pwalk # github.com/openshift/api v0.0.0-20210927171657-636513e97fda github.com/openshift/api/security/v1 # github.com/pborman/uuid v1.2.1 @@ -585,7 +590,7 @@ gopkg.in/tomb.v1 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gopkg.in/yaml.v3 -# k8s.io/api v0.22.2 => k8s.io/api v0.22.2 +# k8s.io/api v0.22.4 => k8s.io/api v0.22.4 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -632,7 +637,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.22.2 => k8s.io/apimachinery v0.22.2 +# k8s.io/apimachinery v0.22.4 => k8s.io/apimachinery v0.22.4 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -689,7 +694,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.22.2 => k8s.io/apiserver v0.22.2 +# k8s.io/apiserver v0.22.4 => k8s.io/apiserver v0.22.4 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -729,7 +734,7 @@ k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.22.2 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.22.4 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -964,12 +969,12 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.22.2 => k8s.io/cloud-provider v0.22.2 +# k8s.io/cloud-provider v0.22.4 => k8s.io/cloud-provider v0.22.4 ## explicit k8s.io/cloud-provider k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.22.2 => k8s.io/component-base v0.22.2 +# k8s.io/component-base v0.22.4 => k8s.io/component-base v0.22.4 k8s.io/component-base/cli/flag k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 @@ -979,20 +984,20 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/testutil k8s.io/component-base/traces k8s.io/component-base/version -# k8s.io/component-helpers v0.22.2 => k8s.io/component-helpers v0.22.2 +# k8s.io/component-helpers v0.22.4 => k8s.io/component-helpers v0.22.4 k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity # k8s.io/klog/v2 v2.10.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e +# k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.22.2 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.22.4 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.22.2 +# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.22.4 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.22.3 +# k8s.io/kubernetes v1.22.4 ## explicit k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1025,6 +1030,7 @@ k8s.io/kubernetes/pkg/security/apparmor k8s.io/kubernetes/pkg/util/hash k8s.io/kubernetes/pkg/util/labels k8s.io/kubernetes/pkg/util/parsers +k8s.io/kubernetes/pkg/util/selinux k8s.io/kubernetes/pkg/util/sysctl k8s.io/kubernetes/pkg/util/taints k8s.io/kubernetes/pkg/volume @@ -1054,7 +1060,7 @@ k8s.io/kubernetes/test/e2e/storage/podlogs k8s.io/kubernetes/test/e2e/storage/utils k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image -# k8s.io/mount-utils v0.22.2 => k8s.io/mount-utils v0.22.2 +# k8s.io/mount-utils v0.22.4 => k8s.io/mount-utils v0.22.4 ## explicit k8s.io/mount-utils # k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a @@ -1124,29 +1130,29 @@ sigs.k8s.io/yaml # github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 # github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 # gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0 -# k8s.io/api => k8s.io/api v0.22.2 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.2 -# k8s.io/apimachinery => k8s.io/apimachinery v0.22.2 -# k8s.io/apiserver => k8s.io/apiserver v0.22.2 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.2 -# k8s.io/client-go => k8s.io/client-go v0.22.2 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.2 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.2 -# k8s.io/code-generator => k8s.io/code-generator v0.22.2 -# k8s.io/component-base => k8s.io/component-base v0.22.2 -# k8s.io/component-helpers => k8s.io/component-helpers v0.22.2 -# k8s.io/controller-manager => k8s.io/controller-manager v0.22.2 -# k8s.io/cri-api => k8s.io/cri-api v0.22.2 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.2 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.2 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.2 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.2 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.2 -# k8s.io/kubectl => k8s.io/kubectl v0.22.2 -# k8s.io/kubelet => k8s.io/kubelet v0.22.2 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.2 -# k8s.io/metrics => k8s.io/metrics v0.22.2 -# k8s.io/mount-utils => k8s.io/mount-utils v0.22.2 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.2 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.2 +# k8s.io/api => k8s.io/api v0.22.4 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.4 +# k8s.io/apimachinery => k8s.io/apimachinery v0.22.4 +# k8s.io/apiserver => k8s.io/apiserver v0.22.4 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.4 +# k8s.io/client-go => k8s.io/client-go v0.22.4 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.4 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.4 +# k8s.io/code-generator => k8s.io/code-generator v0.22.4 +# k8s.io/component-base => k8s.io/component-base v0.22.4 +# k8s.io/component-helpers => k8s.io/component-helpers v0.22.4 +# k8s.io/controller-manager => k8s.io/controller-manager v0.22.4 +# k8s.io/cri-api => k8s.io/cri-api v0.22.4 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.4 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.4 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.4 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.4 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.4 +# k8s.io/kubectl => k8s.io/kubectl v0.22.4 +# k8s.io/kubelet => k8s.io/kubelet v0.22.4 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.4 +# k8s.io/metrics => k8s.io/metrics v0.22.4 +# k8s.io/mount-utils => k8s.io/mount-utils v0.22.4 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.4 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.4 # layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917 From bdcf3273b5f3cbc35054255956b909f96ab21f36 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 16 Nov 2021 18:40:11 +0530 Subject: [PATCH 19/23] rbd: provide a way to supply mounter specific mapOptions from sc Uses the below schema to supply mounter specific map/unmapOptions to the nodeplugin based on the discussion we all had at https://github.com/ceph/ceph-csi/pull/2636 This should specifically be really helpful with the `tryOthermonters` set to true, i.e with fallback mechanism settings turned ON. mapOption: "kbrd:v1,v2,v3;nbd:v1,v2,v3" - By omitting `krbd:` or `nbd:`, the option(s) apply to rbdDefaultMounter which is krbd. - A user can _override_ the options for a mounter by specifying `krbd:` or `nbd:`. mapOption: "v1,v2,v3;nbd:v1,v2,v3" is effectively the same as the 1st example. - Sections are split by `;`. - If users want to specify common options for both `krbd` and `nbd`, they should mention them twice. Signed-off-by: Prasanna Kumar Kalever --- internal/rbd/nodeserver.go | 8 ++- internal/rbd/rbd_attach.go | 60 ++++++++++++++++++++ internal/rbd/rbd_attach_test.go | 99 +++++++++++++++++++++++++++++++++ 3 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 internal/rbd/rbd_attach_test.go diff --git a/internal/rbd/nodeserver.go b/internal/rbd/nodeserver.go index 0425278f7..8242751cd 100644 --- a/internal/rbd/nodeserver.go +++ b/internal/rbd/nodeserver.go @@ -224,12 +224,14 @@ func populateRbdVol( return nil, status.Errorf(codes.Internal, "unsupported krbd Feature") } // fallback to rbd-nbd, - // ignore the mapOptions and unmapOptions as they are meant for krbd use. rv.Mounter = rbdNbdMounter } else { rv.Mounter = req.GetVolumeContext()["mounter"] - rv.MapOptions = req.GetVolumeContext()["mapOptions"] - rv.UnmapOptions = req.GetVolumeContext()["unmapOptions"] + } + + err = getMapOptions(req, rv) + if err != nil { + return nil, err } rv.VolID = volID diff --git a/internal/rbd/rbd_attach.go b/internal/rbd/rbd_attach.go index 3008d68f7..c9ffb6899 100644 --- a/internal/rbd/rbd_attach.go +++ b/internal/rbd/rbd_attach.go @@ -29,6 +29,7 @@ import ( "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" + "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/apimachinery/pkg/util/wait" ) @@ -222,6 +223,65 @@ func setRbdNbdToolFeatures() { log.DefaultLog("NBD module loaded: %t, rbd-nbd supported features, cookie: %t", hasNBD, hasNBDCookieSupport) } +// parseMapOptions helps parse formatted mapOptions and unmapOptions and +// returns mounter specific options. +func parseMapOptions(mapOptions string) (string, string, error) { + var krbdMapOptions, nbdMapOptions string + const ( + noKeyLength = 1 + validLength = 2 + ) + for _, item := range strings.Split(mapOptions, ";") { + var mounter, options string + if item == "" { + continue + } + s := strings.Split(item, ":") + switch len(s) { + case noKeyLength: + options = strings.TrimSpace(s[0]) + krbdMapOptions = options + case validLength: + mounter = strings.TrimSpace(s[0]) + options = strings.TrimSpace(s[1]) + switch strings.ToLower(mounter) { + case accessTypeKRbd: + krbdMapOptions = options + case accessTypeNbd: + nbdMapOptions = options + default: + return "", "", fmt.Errorf("unknown mounter type: %q", mounter) + } + default: + return "", "", fmt.Errorf("badly formatted map/unmap options: %q", mapOptions) + } + } + + return krbdMapOptions, nbdMapOptions, nil +} + +// getMapOptions is a wrapper func, calls parse map/unmap funcs and feeds the +// rbdVolume object. +func getMapOptions(req *csi.NodeStageVolumeRequest, rv *rbdVolume) error { + krbdMapOptions, nbdMapOptions, err := parseMapOptions(req.GetVolumeContext()["mapOptions"]) + if err != nil { + return err + } + krbdUnmapOptions, nbdUnmapOptions, err := parseMapOptions(req.GetVolumeContext()["unmapOptions"]) + if err != nil { + return err + } + if rv.Mounter == rbdDefaultMounter { + rv.MapOptions = krbdMapOptions + rv.UnmapOptions = krbdUnmapOptions + } else if rv.Mounter == rbdNbdMounter { + rv.MapOptions = nbdMapOptions + rv.UnmapOptions = nbdUnmapOptions + } + + return nil +} + func attachRBDImage(ctx context.Context, volOptions *rbdVolume, device string, cr *util.Credentials) (string, error) { var err error diff --git a/internal/rbd/rbd_attach_test.go b/internal/rbd/rbd_attach_test.go new file mode 100644 index 000000000..07749530d --- /dev/null +++ b/internal/rbd/rbd_attach_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbd + +import ( + "strings" + "testing" +) + +func TestParseMapOptions(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mapOption string + expectKrbdOptions string + expectNbdOptions string + expectErr string + }{ + { + name: "with old format", + mapOption: "kOp1,kOp2", + expectKrbdOptions: "kOp1,kOp2", + expectNbdOptions: "", + expectErr: "", + }, + { + name: "with new format", + mapOption: "krbd:kOp1,kOp2;nbd:nOp1,nOp2", + expectKrbdOptions: "kOp1,kOp2", + expectNbdOptions: "nOp1,nOp2", + expectErr: "", + }, + { + name: "without krbd: label", + mapOption: "kOp1,kOp2;nbd:nOp1,nOp2", + expectKrbdOptions: "kOp1,kOp2", + expectNbdOptions: "nOp1,nOp2", + expectErr: "", + }, + { + name: "with only nbd label", + mapOption: "nbd:nOp1,nOp2", + expectKrbdOptions: "", + expectNbdOptions: "nOp1,nOp2", + expectErr: "", + }, + { + name: "unknown mounter used", + mapOption: "xyz:xOp1,xOp2", + expectKrbdOptions: "", + expectNbdOptions: "", + expectErr: "unknown mounter type", + }, + { + name: "bad formatted options", + mapOption: "nbd:nOp1:nOp2;", + expectKrbdOptions: "", + expectNbdOptions: "", + expectErr: "badly formatted map/unmap options", + }, + } + for _, tt := range tests { + tc := tt + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + krbdOpts, nbdOpts, err := parseMapOptions(tc.mapOption) + if err != nil && !strings.Contains(err.Error(), tc.expectErr) { + // returned error + t.Errorf("parseMapOptions(%s) returned error, expected: %v, got: %v", + tc.mapOption, tc.expectErr, err) + } + if krbdOpts != tc.expectKrbdOptions { + // unexpected krbd option error + t.Errorf("parseMapOptions(%s) returned unexpected krbd options, expected :%q, got: %q", + tc.mapOption, tc.expectKrbdOptions, krbdOpts) + } + if nbdOpts != tc.expectNbdOptions { + // unexpected nbd option error + t.Errorf("parseMapOptions(%s) returned unexpected nbd options, expected: %q, got: %q", + tc.mapOption, tc.expectNbdOptions, nbdOpts) + } + }) + } +} From ec8397d8d9a3f12d5ae310d2b7408d4e75170d13 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 16 Nov 2021 18:44:00 +0530 Subject: [PATCH 20/23] deploy: updated sc templates for map/unmap Options with the new schema Fixes: #2641 Signed-off-by: Prasanna Kumar Kalever --- charts/ceph-csi-rbd/values.yaml | 10 ++++++++++ examples/rbd/storageclass.yaml | 12 ++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml index 2bf97d554..740ff12db 100644 --- a/charts/ceph-csi-rbd/values.yaml +++ b/charts/ceph-csi-rbd/values.yaml @@ -362,6 +362,11 @@ storageClass: # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # mapOptions: ":op1,op2;:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink" mapOptions: "" # (optional) unmapOptions is a comma-separated list of unmap options. @@ -369,6 +374,11 @@ storageClass: # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # Format: + # unmapOptions: ":op1,op2;:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # unmapOptions: "krbd:force;nbd:force" unmapOptions: "" # The secrets have to contain Ceph credentials with required access diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml index baba539b2..65ce181ab 100644 --- a/examples/rbd/storageclass.yaml +++ b/examples/rbd/storageclass.yaml @@ -53,14 +53,22 @@ parameters: # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 + # Format: + # mapOptions: ":op1,op2;:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink" # (optional) unmapOptions is a comma-separated list of unmap options. # For krbd options refer # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force + # Format: + # unmapOptions: ":op1,op2;:op1,op2" + # An empty mounter field is treated as krbd type for compatibility. + # eg: + # unmapOptions: "krbd:force;nbd:force" # The secrets have to contain Ceph credentials with required access # to the 'pool'. From 5472b66ccf33e9e0444c8b669433f65ef7686186 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 16 Nov 2021 21:21:43 +0530 Subject: [PATCH 21/23] e2e: start adopting to the new format of map/unmapOptions Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index c381325ab..e5ab04e64 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -61,7 +61,7 @@ var ( snapshotPath = rbdExamplePath + "snapshot.yaml" defaultCloneCount = 10 - nbdMapOptions = "debug-rbd=20" + nbdMapOptions = "nbd:debug-rbd=20" e2eDefaultCephLogStrategy = "preserve" ) @@ -271,7 +271,7 @@ var _ = Describe("RBD", func() { } // default io-timeout=0, needs kernel >= 5.4 if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) { - nbdMapOptions = "debug-rbd=20,io-timeout=330" + nbdMapOptions = "nbd:debug-rbd=20,io-timeout=330" } }) From 211ca9b5a74592550b1555edf7f517856c9c9c41 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 22 Nov 2021 15:49:56 +0530 Subject: [PATCH 22/23] rbd: do deep copy for dummyVol struct with shallow copy of rbdVol to dummyVol the image name update of the dummyVol is getting reflected on the rbdVol which we dont want. do deep copy to avoid this problem. Signed-off-by: Madhu Rajanna --- internal/rbd/replicationcontrollerserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/rbd/replicationcontrollerserver.go b/internal/rbd/replicationcontrollerserver.go index f1729e2d1..bd5d4d58e 100644 --- a/internal/rbd/replicationcontrollerserver.go +++ b/internal/rbd/replicationcontrollerserver.go @@ -290,9 +290,9 @@ func createDummyImage(ctx context.Context, rbdVol *rbdVolume) error { if err != nil { return err } - dummyVol := rbdVol + dummyVol := *rbdVol dummyVol.RbdImageName = imgName - err = createImage(ctx, dummyVol, dummyVol.conn.Creds) + err = createImage(ctx, &dummyVol, dummyVol.conn.Creds) if err != nil && !strings.Contains(err.Error(), "File exists") { return err } @@ -310,7 +310,7 @@ func tickleMirroringOnDummyImage(rbdVol *rbdVolume, mirroringMode librbd.ImageMi if err != nil { return err } - dummyVol := rbdVol + dummyVol := *rbdVol dummyVol.RbdImageName = imgName dummyImageOpsLock.Lock() From 027b68ab391fe23e09ab13637c3f595868a6e7c4 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 22 Nov 2021 18:02:52 +0530 Subject: [PATCH 23/23] rbd: operate on dummy image after adding scheduling currently we are fist operating on the dummy image to refresh the pool and then we are adding the scheduling. we think the scheduling should be added first and than we should refresh the pool. If we do this all the existing schedules will be considered from the scheduler. Signed-off-by: Madhu Rajanna --- internal/rbd/replicationcontrollerserver.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/rbd/replicationcontrollerserver.go b/internal/rbd/replicationcontrollerserver.go index bd5d4d58e..7d6659abc 100644 --- a/internal/rbd/replicationcontrollerserver.go +++ b/internal/rbd/replicationcontrollerserver.go @@ -523,12 +523,6 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context, return nil, status.Errorf(codes.Internal, "failed to get mirroring mode %s", err.Error()) } - log.DebugLog(ctx, "Attempting to tickle dummy image for restarting RBD schedules") - err = tickleMirroringOnDummyImage(rbdVol, mode) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to enable mirroring on dummy image %s", err.Error()) - } - interval, startTime := getSchedulingDetails(req.GetParameters()) if interval != admin.NoInterval { err = rbdVol.addSnapshotScheduling(interval, startTime) @@ -543,6 +537,12 @@ func (rs *ReplicationServer) PromoteVolume(ctx context.Context, rbdVol) } + log.DebugLog(ctx, "attempting to tickle dummy image for restarting RBD schedules") + err = tickleMirroringOnDummyImage(rbdVol, mode) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to enable mirroring on dummy image %s", err.Error()) + } + return &replication.PromoteVolumeResponse{}, nil }