mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-31 00:59:30 +00:00
commit
776d354a05
85
.mergify.yml
85
.mergify.yml
@ -37,23 +37,6 @@ queue_rules:
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- and:
|
||||
- base=release-v3.4
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
- "status-success=commitlint"
|
||||
- "status-success=golangci-lint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- and:
|
||||
- base=ci/centos
|
||||
- "status-success=ci/centos/job-validation"
|
||||
@ -186,76 +169,14 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- release-v3.5
|
||||
# automerge backports if CI successfully ran
|
||||
- name: automerge backport release-v3.5
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- base=release-v3.5
|
||||
- label!=DNM
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
- "status-success=commitlint"
|
||||
- "status-success=golangci-lint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- "status-success=DCO"
|
||||
actions:
|
||||
queue:
|
||||
name: default
|
||||
- name: backport patches to release-v3.4 branch
|
||||
- name: backport patches to release-v3.6 branch
|
||||
conditions:
|
||||
- base=devel
|
||||
- label=backport-to-release-v3.4
|
||||
- label=backport-to-release-v3.6
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- release-v3.4
|
||||
# automerge backports if CI successfully ran
|
||||
- name: automerge backport release-v3.4
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- base=release-v3.4
|
||||
- label!=DNM
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
- "status-success=commitlint"
|
||||
- "status-success=golangci-lint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||
- "status-success=DCO"
|
||||
actions:
|
||||
queue:
|
||||
name: default
|
||||
delete_head_branch: {}
|
||||
- release-v3.6
|
||||
- name: remove outdated approvals on ci/centos
|
||||
conditions:
|
||||
- base=ci/centos
|
||||
|
@ -128,10 +128,10 @@ in the Kubernetes documentation.
|
||||
| v3.6.0 (Release) | quay.io/cephcsi/cephcsi | v3.6.0 |
|
||||
| v3.5.1 (Release) | quay.io/cephcsi/cephcsi | v3.5.1 |
|
||||
| v3.5.0 (Release) | quay.io/cephcsi/cephcsi | v3.5.0 |
|
||||
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
||||
|
||||
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
|
||||
| ----------------------- | --------------------------------| --------- |
|
||||
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
||||
| v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 |
|
||||
| v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 |
|
||||
| v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 |
|
||||
|
@ -31,6 +31,7 @@ spec:
|
||||
priorityClassName: {{ .Values.nodeplugin.priorityClassName }}
|
||||
{{- end }}
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||
# resolved through k8s service, set dns policy to cluster first
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
@ -27,6 +27,7 @@ serviceAccounts:
|
||||
# - "<MONValue2>"
|
||||
# cephFS:
|
||||
# subvolumeGroup: "csi"
|
||||
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
|
||||
csiConfig: []
|
||||
|
||||
# Set logging level for csi containers.
|
||||
|
@ -102,6 +102,7 @@ spec:
|
||||
- "--v={{ .Values.logLevel }}"
|
||||
- "--timeout={{ .Values.provisioner.timeout }}"
|
||||
- "--leader-election=true"
|
||||
- "--extra-create-metadata=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
|
@ -25,6 +25,7 @@ serviceAccounts:
|
||||
# monitors:
|
||||
# - "<MONValue1>"
|
||||
# - "<MONValue2>"
|
||||
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
|
||||
csiConfig: []
|
||||
|
||||
# Configuration details of clusterID,PoolID and FscID mapping
|
||||
|
@ -15,6 +15,7 @@ spec:
|
||||
serviceAccountName: cephfs-csi-nodeplugin
|
||||
priorityClassName: system-node-critical
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||
# resolved through k8s service, set dns policy to cluster first
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
@ -73,6 +73,7 @@ spec:
|
||||
- "--v=5"
|
||||
- "--timeout=150s"
|
||||
- "--leader-election=true"
|
||||
- "--extra-create-metadata=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
|
@ -25,7 +25,7 @@ it is **highly** encouraged to:
|
||||
package](https://github.com/ceph/go-ceph). It is required to install the
|
||||
Ceph C headers in order to compile Ceph-CSI. The packages are called
|
||||
`librados-devel` and `librbd-devel` on many Linux distributions. See the
|
||||
[go-ceph installaton
|
||||
[go-ceph installation
|
||||
instructions](https://github.com/ceph/go-ceph#installation) for more
|
||||
details.
|
||||
* Run
|
||||
@ -169,7 +169,7 @@ The `component` in the subject of the commit message can be one of the following
|
||||
* `doc`: documentation updates
|
||||
* `util`: utilities shared between components use `cephfs` or `rbd` if the
|
||||
change is only relevant for one of the type of storage
|
||||
* `journal`: any of the journalling functionalities
|
||||
* `journal`: any of the journaling functionalities
|
||||
* `helm`: deployment changes for the Helm charts
|
||||
* `deploy`: updates to Kubernetes templates for deploying components
|
||||
* `build`: anything related to building Ceph-CSI, the executable or container
|
||||
@ -312,12 +312,12 @@ Right now, we also have below commands to manually retrigger the CI jobs
|
||||
**Caution**: Please do not retrigger the CI jobs without an understanding of
|
||||
the root cause, because:
|
||||
|
||||
* We may miss some of the important corner cases which are true negatives,
|
||||
* We may miss some important corner cases which are true negatives,
|
||||
and hard to reproduce
|
||||
* Retriggering jobs for known failures can unnecessarily put CI resources
|
||||
under pressure
|
||||
|
||||
Hence it is recommended that you please go through the CI logs first, if you
|
||||
Hence, it is recommended that you please go through the CI logs first, if you
|
||||
are certain about the flaky test failure behavior, then comment on the PR
|
||||
indicating the logs about a particular test that went flaky and use the
|
||||
appropriate command to retrigger the job[s].
|
||||
|
@ -26,10 +26,10 @@ Promethues can be deployed through the promethues operator described [here](http
|
||||
The [service-monitor](../examples/service-monitor.yaml) will tell promethues how
|
||||
to pull metrics out of CSI.
|
||||
|
||||
Each CSI pod has a service to expose the endpoint to prometheus. By default rbd
|
||||
Each CSI pod has a service to expose the endpoint to prometheus. By default, rbd
|
||||
pods run on port 8080 and cephfs 8081.
|
||||
These can be changed if desired or if multiple ceph clusters are deployed more
|
||||
ports will be used for additional CSI pods.
|
||||
|
||||
Note: You may need to open the ports used in your firewall depending on how you
|
||||
cluster is setup.
|
||||
Note: You may need to open the ports used in your firewall depending on how your
|
||||
cluster has set up.
|
||||
|
@ -41,7 +41,7 @@ and it must be backward compatible.
|
||||
|
||||
- When `MAJOR` increases, the new release adds **new features,
|
||||
bug fixes, or both** and which *changes the behavior from
|
||||
the previous release* (may be backward incompatible).
|
||||
the previous release* (maybe backward incompatible).
|
||||
|
||||
## Tagging repositories
|
||||
|
||||
|
@ -90,12 +90,12 @@ are available while running tests:
|
||||
| flag | description |
|
||||
| ----------------- | ----------------------------------------------------------------------------- |
|
||||
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
|
||||
| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) |
|
||||
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephFS csi driver as part of E2E (default: true) |
|
||||
| deploy-cephfs | Deploy cephFS CSI driver as part of E2E (default: true) |
|
||||
| deploy-rbd | Deploy rbd CSI driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephFS CSI driver as part of E2E (default: true) |
|
||||
| upgrade-testing | Perform upgrade testing (default: false) |
|
||||
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
|
||||
| test-rbd | Test rbd csi driver as part of E2E (default: true) |
|
||||
| upgrade-version | Target version for upgrade testing (default: "v3.5.1") |
|
||||
| test-rbd | Test rbd CSI driver as part of E2E (default: true) |
|
||||
| cephcsi-namespace | The namespace in which cephcsi driver will be created (default: "default") |
|
||||
| rook-namespace | The namespace in which rook operator is installed (default: "rook-ceph") |
|
||||
| kubeconfig | Path to kubeconfig containing embedded authinfo (default: $HOME/.kube/config) |
|
||||
@ -106,30 +106,6 @@ are available while running tests:
|
||||
|
||||
After the support for snapshot/clone has been added to ceph-csi,
|
||||
you need to follow these steps before running e2e.
|
||||
Please note that the snapshot operation works only if the Kubernetes version
|
||||
is greater than or equal to 1.17.0.
|
||||
|
||||
- Delete Alpha snapshot CRD created by ceph-csi in rook.
|
||||
- Check if you have any `v1alpha1` CRD created in our Kubernetes cluster
|
||||
|
||||
```bash
|
||||
$ kubectl get crd volumesnapshotclasses.snapshot.storage.k8s.io -o yaml |grep v1alpha1
|
||||
- name: v1alpha1
|
||||
- v1alpha1
|
||||
$ kubectl get crd volumesnapshotcontents.snapshot.storage.k8s.io -o yaml |grep v1alpha1
|
||||
- name: v1alpha1
|
||||
- v1alpha1
|
||||
$ kubectl get crd volumesnapshots.snapshot.storage.k8s.io -o yaml |grep v1alpha1
|
||||
- name: v1alpha1
|
||||
- v1alpha1
|
||||
```
|
||||
|
||||
- If you have Alpha CRD, delete it as from Kubernetes 1.17.0+ the snapshot
|
||||
should be `v1beta1`
|
||||
|
||||
```console
|
||||
./scripts/install-snapshot.sh delete-crd
|
||||
```
|
||||
|
||||
- Install snapshot controller and Beta snapshot CRD
|
||||
|
||||
|
@ -456,7 +456,7 @@ var _ = Describe("cephfs", func() {
|
||||
// clean up after ourselves
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
@ -575,7 +575,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("killall command failed: err %v, stderr %s", err, stdErr)
|
||||
}
|
||||
// Verify Pod podName2 that stat()-ing the mountpoint results in ENOTCONN.
|
||||
// Verify Pod pod2Name that stat()-ing the mountpoint results in ENOTCONN.
|
||||
stdErr, err = doStat(pod2Name)
|
||||
if err == nil || !strings.Contains(stdErr, "not connected") {
|
||||
e2elog.Failf(
|
||||
@ -583,7 +583,7 @@ var _ = Describe("cephfs", func() {
|
||||
err, stdErr,
|
||||
)
|
||||
}
|
||||
// Delete podName2 Pod. This serves two purposes: it verifies that deleting pods with
|
||||
// Delete pod2Name Pod. This serves two purposes: it verifies that deleting pods with
|
||||
// corrupted ceph-fuse mountpoints works, and it lets the replicaset controller recreate
|
||||
// the pod with hopefully mounts working again.
|
||||
err = deletePod(pod2Name, depl.Namespace, c, deployTimeout)
|
||||
@ -616,7 +616,7 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
e2elog.Failf("no new replica found ; found replicas %v", podNames)
|
||||
}
|
||||
// Verify Pod podName3 has its ceph-fuse mount working again.
|
||||
// Verify Pod pod2Name has its ceph-fuse mount working again.
|
||||
err = ensureStatSucceeds(pod2Name)
|
||||
if err != nil {
|
||||
e2elog.Failf(err.Error())
|
||||
|
@ -40,7 +40,7 @@ func init() {
|
||||
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
||||
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
||||
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
||||
flag.StringVar(&upgradeVersion, "upgrade-version", "v3.3.1", "target version for upgrade testing")
|
||||
flag.StringVar(&upgradeVersion, "upgrade-version", "v3.5.1", "target version for upgrade testing")
|
||||
flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed")
|
||||
flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed")
|
||||
setDefaultKubeconfig()
|
||||
|
@ -31,7 +31,7 @@ const (
|
||||
|
||||
// kmsConfig is an interface that should be used when passing a configuration
|
||||
// for a KMS to validation functions. This allows the validation functions to
|
||||
// work independently from the actual KMS.
|
||||
// work independently of the actual KMS.
|
||||
type kmsConfig interface {
|
||||
canGetPassphrase() bool
|
||||
getPassphrase(f *framework.Framework, key string) (string, string)
|
||||
|
292
e2e/rbd.go
292
e2e/rbd.go
@ -29,6 +29,7 @@ import (
|
||||
. "github.com/onsi/ginkgo" // nolint
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -89,6 +90,17 @@ var (
|
||||
|
||||
nbdMapOptions = "nbd:debug-rbd=20"
|
||||
e2eDefaultCephLogStrategy = "preserve"
|
||||
|
||||
// PV and PVC metadata keys used by external provisioner as part of
|
||||
// create requests as parameters, when `extra-create-metadata` is true.
|
||||
pvcNameKey = "csi.storage.k8s.io/pvc/name"
|
||||
pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace"
|
||||
pvNameKey = "csi.storage.k8s.io/pv/name"
|
||||
|
||||
// snapshot metadata keys.
|
||||
volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name"
|
||||
volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace"
|
||||
volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name"
|
||||
)
|
||||
|
||||
func deployRBDPlugin() {
|
||||
@ -394,6 +406,286 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
By("create a PVC and check PVC/PV metadata on RBD image", func() {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
imageList, err := listRBDImages(f, defaultRBDPool)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to list rbd images: %v", err)
|
||||
}
|
||||
|
||||
pvcName, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr)
|
||||
}
|
||||
pvcName = strings.TrimSuffix(pvcName, "\n")
|
||||
if pvcName != pvc.Name {
|
||||
e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName)
|
||||
}
|
||||
|
||||
pvcNamespace, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get PVC namespace %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNamespaceKey, err, stdErr)
|
||||
}
|
||||
pvcNamespace = strings.TrimSuffix(pvcNamespace, "\n")
|
||||
if pvcNamespace != pvc.Namespace {
|
||||
e2elog.Failf("expected pvcNamespace %q got %q", pvc.Namespace, pvcNamespace)
|
||||
}
|
||||
|
||||
pvcObj, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(
|
||||
context.TODO(),
|
||||
pvc.Name,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err)
|
||||
}
|
||||
if pvcObj.Spec.VolumeName == "" {
|
||||
e2elog.Logf("pv name is empty %q in namespace %q: %v", pvc.Name, pvc.Namespace, err)
|
||||
}
|
||||
pvName, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get PV name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvNameKey, err, stdErr)
|
||||
}
|
||||
pvName = strings.TrimSuffix(pvName, "\n")
|
||||
if pvName != pvcObj.Spec.VolumeName {
|
||||
e2elog.Failf("expected pvName %q got %q", pvcObj.Spec.VolumeName, pvName)
|
||||
}
|
||||
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc: %v", err)
|
||||
}
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("reattach the old PV to a new PVC and check if PVC metadata is updated on RBD image", func() {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
imageList, err := listRBDImages(f, defaultRBDPool)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to list rbd images: %v", err)
|
||||
}
|
||||
|
||||
pvcName, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr)
|
||||
}
|
||||
pvcName = strings.TrimSuffix(pvcName, "\n")
|
||||
if pvcName != pvc.Name {
|
||||
e2elog.Failf("expected pvcName %q got %q", pvc.Name, pvcName)
|
||||
}
|
||||
|
||||
pvcObj, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(
|
||||
context.TODO(),
|
||||
pvc.Name,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting pvc %q in namespace %q: %v", pvc.Name, pvc.Namespace, err)
|
||||
}
|
||||
if pvcObj.Spec.VolumeName == "" {
|
||||
e2elog.Logf("pv name is empty %q in namespace %q: %v", pvc.Name, pvc.Namespace, err)
|
||||
}
|
||||
|
||||
patchBytes := []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Retain", "claimRef": null}}`)
|
||||
_, err = c.CoreV1().PersistentVolumes().Patch(
|
||||
context.TODO(),
|
||||
pvcObj.Spec.VolumeName,
|
||||
types.StrategicMergePatchType,
|
||||
patchBytes,
|
||||
metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error Patching PV %q for persistentVolumeReclaimPolicy and claimRef: %v",
|
||||
pvcObj.Spec.VolumeName, err)
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(
|
||||
context.TODO(),
|
||||
pvc.Name,
|
||||
metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete pvc: %w", err)
|
||||
}
|
||||
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
pvcObj.Name = "rbd-pvc-new"
|
||||
// unset the resource version as should not be set on objects to be created
|
||||
pvcObj.ResourceVersion = ""
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvcObj, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create new PVC: %v", err)
|
||||
}
|
||||
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
pvcName, stdErr, err = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get PVC name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvcNameKey, err, stdErr)
|
||||
}
|
||||
pvcName = strings.TrimSuffix(pvcName, "\n")
|
||||
if pvcName != pvcObj.Name {
|
||||
e2elog.Failf("expected pvcName %q got %q", pvcObj.Name, pvcName)
|
||||
}
|
||||
|
||||
patchBytes = []byte(`{"spec":{"persistentVolumeReclaimPolicy": "Delete"}}`)
|
||||
_, err = c.CoreV1().PersistentVolumes().Patch(
|
||||
context.TODO(),
|
||||
pvcObj.Spec.VolumeName,
|
||||
types.StrategicMergePatchType,
|
||||
patchBytes,
|
||||
metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error Patching PV %q for persistentVolumeReclaimPolicy: %v", pvcObj.Spec.VolumeName, err)
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvcObj, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc: %v", err)
|
||||
}
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("create a snapshot and check metadata on RBD snapshot image", func() {
|
||||
err := createRBDSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = deleteRBDSnapshotClass()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc and application binding: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
// delete pod as we should not create snapshot for in-use pvc
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete application: %v", err)
|
||||
}
|
||||
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshot: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
// parent PVC + snapshot
|
||||
totalImages := 2
|
||||
validateRBDImageCount(f, totalImages, defaultRBDPool)
|
||||
|
||||
imageList, err := listRBDImages(f, defaultRBDPool)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to list rbd images: %v", err)
|
||||
}
|
||||
|
||||
volSnapName, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get volume snapshot name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNameKey, err, stdErr)
|
||||
}
|
||||
volSnapName = strings.TrimSuffix(volSnapName, "\n")
|
||||
if volSnapName != snap.Name {
|
||||
e2elog.Failf("expected volSnapName %q got %q", snap.Name, volSnapName)
|
||||
}
|
||||
|
||||
volSnapNamespace, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get volume snapshot namespace %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapNamespaceKey, err, stdErr)
|
||||
}
|
||||
volSnapNamespace = strings.TrimSuffix(volSnapNamespace, "\n")
|
||||
if volSnapNamespace != snap.Namespace {
|
||||
e2elog.Failf("expected volSnapNamespace %q got %q", snap.Namespace, volSnapNamespace)
|
||||
}
|
||||
|
||||
content, err := getVolumeSnapshotContent(snap.Namespace, snap.Name)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get snapshotcontent for %s in namespace %s: %v",
|
||||
snap.Name, snap.Namespace, err)
|
||||
}
|
||||
volSnapContentName, stdErr, err := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey),
|
||||
rookNamespace)
|
||||
if err != nil || stdErr != "" {
|
||||
e2elog.Failf("failed to get snapshotcontent name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], volSnapContentNameKey, err, stdErr)
|
||||
}
|
||||
volSnapContentName = strings.TrimSuffix(volSnapContentName, "\n")
|
||||
if volSnapContentName != content.Name {
|
||||
e2elog.Failf("expected volSnapContentName %q got %q", content.Name, volSnapContentName)
|
||||
}
|
||||
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete snapshot: %v", err)
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc: %v", err)
|
||||
}
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("verify generic ephemeral volume support", func() {
|
||||
// generic ephemeral volume support is supported from 1.21
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 21) {
|
||||
|
@ -50,6 +50,42 @@ kubectl replace -f ./csi-config-map-sample.yaml
|
||||
Storage class and snapshot class, using `<cluster-id>` as the value for the
|
||||
option `clusterID`, can now be created on the cluster.
|
||||
|
||||
## Running CephCSI with pod networking
|
||||
|
||||
The current problem with Pod Networking, is when a CephFS/RBD volume is mounted
|
||||
in a pod using Ceph CSI and then the CSI CephFS/RBD plugin is restarted or
|
||||
terminated (e.g. by restarting or deleting its DaemonSet), all operations on
|
||||
the volume become blocked, even after restarting the CSI pods.
|
||||
|
||||
The only workaround is to restart the node where the Ceph CSI plugin pod was
|
||||
restarted. This can be mitigated by running the `rbd map`/`mount -t` commands
|
||||
in a different network namespace which does not get deleted when the CSI
|
||||
CephFS/RBD plugin is restarted or terminated.
|
||||
|
||||
If someone wants to run the CephCSI with the pod networking they can still do
|
||||
by setting the `netNamespaceFilePath`. If this path is set CephCSI will execute
|
||||
the `rbd map`/`mount -t` commands after entering the [network
|
||||
namespace](https://man7.org/linux/man-pages/man7/network_namespaces.7.html)
|
||||
specified by `netNamespaceFilePath` with the
|
||||
[nsenter](https://man7.org/linux/man-pages/man1/nsenter.1.html) command.
|
||||
|
||||
`netNamespaceFilePath` should point to the network namespace of some
|
||||
long-running process, typically it would be a symlink to
|
||||
`/proc/<long running process id>/ns/net`.
|
||||
|
||||
The long-running process can also be another pod which is a Daemonset which
|
||||
never restarts. This Pod should only be stopped and restarted when a node is
|
||||
stopped so that volume operations do not become blocked. The new DaemonSet pod
|
||||
can contain a single container, responsible for holding its pod network alive.
|
||||
It is used as a passthrough by the CephCSI plugin pod which when mounting or
|
||||
mapping will use the network namespace of this pod.
|
||||
|
||||
Once the pod is created get its PID and create a symlink to
|
||||
`/proc/<PID>/ns/net` in the hostPath volume shared with the csi-plugin pod and
|
||||
specify the path in the `netNamespaceFilePath` option.
|
||||
|
||||
*Note* This Pod should have `hostPID: true` in the Pod Spec.
|
||||
|
||||
## Deploying the storage class
|
||||
|
||||
Once the plugin is successfully deployed, you'll need to customize
|
||||
|
@ -20,6 +20,12 @@ kind: ConfigMap
|
||||
# NOTE: Make sure you don't add radosNamespace option to a currently in use
|
||||
# configuration as it will cause issues.
|
||||
# The field "cephFS.subvolumeGroup" is optional and defaults to "csi".
|
||||
# The <netNamespaceFilePath#> fields are the various network namespace
|
||||
# path for the Ceph cluster identified by the <cluster-id>, This will be used
|
||||
# by the CSI plugin to execute the rbd map/unmap and mount -t commands in the
|
||||
# network namespace specified by the <netNamespaceFilePath#>.
|
||||
# If a CSI plugin is using more than one Ceph cluster, repeat the section for
|
||||
# each such cluster in use.
|
||||
# NOTE: Changes to the configmap is automatically updated in the running pods,
|
||||
# thus restarting existing pods using the configmap is NOT required on edits
|
||||
# to the configmap.
|
||||
@ -37,6 +43,7 @@ data:
|
||||
{
|
||||
"clusterID": "<cluster-id>",
|
||||
"radosNamespace": "<rados-namespace>",
|
||||
"netNamespaceFilePath": "<kubeletRootPath>/plugins/rbd.csi.ceph.com/net",
|
||||
"monitors": [
|
||||
"<MONValue1>",
|
||||
"<MONValue2>",
|
||||
|
12
go.mod
12
go.mod
@ -5,7 +5,7 @@ go 1.17
|
||||
require (
|
||||
github.com/IBM/keyprotect-go-client v0.7.0
|
||||
github.com/aws/aws-sdk-go v1.43.32
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.3
|
||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||
github.com/ceph/go-ceph v0.14.0
|
||||
github.com/container-storage-interface/spec v1.5.0
|
||||
@ -44,11 +44,11 @@ require (
|
||||
require (
|
||||
github.com/armon/go-metrics v0.3.9 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect
|
||||
github.com/aws/smithy-go v1.11.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect
|
||||
github.com/aws/smithy-go v1.11.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.2.0 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
|
24
go.sum
24
go.sum
@ -140,18 +140,18 @@ github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.32 h1:b2NQnfWfImfo7yzXq6gzXEC+6s5v1t2RU3G9o+VirYo=
|
||||
github.com/aws/aws-sdk-go v1.43.32/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js=
|
||||
github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s=
|
||||
github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
|
||||
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8=
|
||||
github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
|
||||
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
|
@ -65,12 +65,20 @@ func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, vol
|
||||
if volOptions.FsName != "" {
|
||||
args = append(args, "--client_mds_namespace="+volOptions.FsName)
|
||||
}
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
if volOptions.NetNamespaceFilePath != "" {
|
||||
_, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOptions.NetNamespaceFilePath, "ceph-fuse", args[:]...)
|
||||
} else {
|
||||
_, stderr, err = util.ExecCommand(ctx, "ceph-fuse", args[:]...)
|
||||
}
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "ceph-fuse", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
// Parse the output:
|
||||
// We need "starting fuse" meaning the mount is ok
|
||||
// and PID of the ceph-fuse daemon for unmount
|
||||
|
@ -51,7 +51,16 @@ func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, v
|
||||
|
||||
args = append(args, "-o", optionsStr)
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "mount", args[:]...)
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
if volOptions.NetNamespaceFilePath != "" {
|
||||
_, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOptions.NetNamespaceFilePath, "mount", args[:]...)
|
||||
} else {
|
||||
_, stderr, err = util.ExecCommand(ctx, "mount", args[:]...)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
@ -126,6 +126,11 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
|
||||
volOptions.NetNamespaceFilePath, err = util.GetNetNamespaceFilePath(util.CsiConfigFile, volOptions.ClusterID)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
mnt, err := mounter.New(volOptions)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
|
@ -50,6 +50,8 @@ type VolumeOptions struct {
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
KernelMountOptions string `json:"kernelMountOptions"`
|
||||
FuseMountOptions string `json:"fuseMountOptions"`
|
||||
// Network namespace file path to execute nsenter command
|
||||
NetNamespaceFilePath string
|
||||
|
||||
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
||||
conn *util.ClusterConnection
|
||||
|
@ -177,7 +177,13 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, pvcNamespace, cr)
|
||||
rbdVolID, err := rbd.RegenerateJournal(
|
||||
pv.Spec.CSI.VolumeAttributes,
|
||||
pv.Spec.ClaimRef.Name,
|
||||
volumeHandler,
|
||||
requestName,
|
||||
pvcNamespace,
|
||||
cr)
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to regenerate journal %s", err)
|
||||
|
||||
|
@ -329,6 +329,12 @@ func (cs *ControllerServer) CreateVolume(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set Metadata on PV Create
|
||||
err = rbdVol.setVolumeMetadata(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildCreateVolumeResponse(req, rbdVol), nil
|
||||
}
|
||||
|
||||
@ -446,6 +452,12 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
|
||||
}
|
||||
}
|
||||
|
||||
// Set metadata on restart of provisioner pod when image exist
|
||||
err := rbdVol.setVolumeMetadata(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildCreateVolumeResponse(req, rbdVol), nil
|
||||
}
|
||||
|
||||
@ -951,6 +963,7 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
}
|
||||
|
||||
// CreateSnapshot creates the snapshot in backend and stores metadata in store.
|
||||
// nolint:cyclop // TODO: reduce complexity
|
||||
func (cs *ControllerServer) CreateSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
@ -1024,7 +1037,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
if found {
|
||||
return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr)
|
||||
return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr, req.GetParameters())
|
||||
}
|
||||
|
||||
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
||||
@ -1050,6 +1063,16 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// Update the metadata on snapshot not on the original image
|
||||
rbdVol.RbdImageName = rbdSnap.RbdSnapName
|
||||
|
||||
// Set snapshot-name/snapshot-namespace/snapshotcontent-name details
|
||||
// on RBD backend image as metadata on create
|
||||
err = rbdVol.setSnapshotMetadata(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: vol.VolSize,
|
||||
@ -1067,7 +1090,8 @@ func cloneFromSnapshot(
|
||||
ctx context.Context,
|
||||
rbdVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cr *util.Credentials) (*csi.CreateSnapshotResponse, error) {
|
||||
cr *util.Credentials,
|
||||
parameters map[string]string) (*csi.CreateSnapshotResponse, error) {
|
||||
vol := generateVolFromSnap(rbdSnap)
|
||||
err := vol.Connect(cr)
|
||||
if err != nil {
|
||||
@ -1100,6 +1124,15 @@ func cloneFromSnapshot(
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// Update snapshot-name/snapshot-namespace/snapshotcontent-name details on
|
||||
// RBD backend image as metadata on restart of provisioner pod when image exist
|
||||
if len(parameters) != 0 {
|
||||
err = rbdVol.setSnapshotMetadata(parameters)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: rbdSnap.VolSize,
|
||||
|
@ -153,6 +153,8 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
log.FatalLogMsg(err.Error())
|
||||
}
|
||||
rbd.SetGlobalInt("krbdFeatures", krbdFeatures)
|
||||
|
||||
rbd.SetRbdNbdToolFeatures()
|
||||
}
|
||||
|
||||
if conf.IsControllerServer {
|
||||
|
@ -332,6 +332,10 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer rv.Destroy()
|
||||
|
||||
rv.NetNamespaceFilePath, err = util.GetNetNamespaceFilePath(util.CsiConfigFile, rv.ClusterID)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if isHealer {
|
||||
err = healerStageTransaction(ctx, cr, rv, stagingParentPath)
|
||||
if err != nil {
|
||||
|
@ -92,10 +92,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
setRbdNbdToolFeatures()
|
||||
}
|
||||
|
||||
// rbdDeviceInfo strongly typed JSON spec for rbd device list output (of type krbd).
|
||||
type rbdDeviceInfo struct {
|
||||
ID string `json:"id"`
|
||||
@ -216,8 +212,9 @@ func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries
|
||||
return "", false
|
||||
}
|
||||
|
||||
// set features available with rbd-nbd, and NBD module loaded status.
|
||||
func setRbdNbdToolFeatures() {
|
||||
// SetRbdNbdToolFeatures sets features available with rbd-nbd, and NBD module
|
||||
// loaded status.
|
||||
func SetRbdNbdToolFeatures() {
|
||||
var stderr string
|
||||
// check if the module is loaded or compiled in
|
||||
_, err := os.Stat(fmt.Sprintf("/sys/module/%s", moduleNbd))
|
||||
@ -457,8 +454,17 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
|
||||
mapArgs = append(mapArgs, "--read-only")
|
||||
}
|
||||
|
||||
// Execute map
|
||||
stdout, stderr, err := util.ExecCommand(ctx, cli, mapArgs...)
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
if volOpt.NetNamespaceFilePath != "" {
|
||||
stdout, stderr, err = util.ExecuteCommandWithNSEnter(ctx, volOpt.NetNamespaceFilePath, cli, mapArgs...)
|
||||
} else {
|
||||
stdout, stderr, err = util.ExecCommand(ctx, cli, mapArgs...)
|
||||
}
|
||||
if err != nil {
|
||||
log.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr)
|
||||
// unmap rbd image if connection timeout
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/k8s"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
@ -533,9 +534,13 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
|
||||
// Generate new volume Handler
|
||||
// The volume handler won't remain same as its contains poolID,clusterID etc
|
||||
// which are not same across clusters.
|
||||
// nolint:gocyclo,cyclop // TODO: reduce complexity
|
||||
func RegenerateJournal(
|
||||
volumeAttributes map[string]string,
|
||||
volumeID, requestName, owner string,
|
||||
claimName,
|
||||
volumeID,
|
||||
requestName,
|
||||
owner string,
|
||||
cr *util.Credentials) (string, error) {
|
||||
ctx := context.Background()
|
||||
var (
|
||||
@ -598,16 +603,24 @@ func RegenerateJournal(
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if imageData != nil {
|
||||
rbdVol.ReservedID = imageData.ImageUUID
|
||||
rbdVol.ImageID = imageData.ImageAttributes.ImageID
|
||||
rbdVol.Owner = imageData.ImageAttributes.Owner
|
||||
rbdVol.RbdImageName = imageData.ImageAttributes.ImageName
|
||||
if rbdVol.ImageID == "" {
|
||||
err = rbdVol.storeImageID(ctx, j)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
// Update Metadata on reattach of the same old PV
|
||||
parameters := k8s.PrepareVolumeMetadata(claimName, rbdVol.Owner, "")
|
||||
err = rbdVol.setVolumeMetadata(parameters)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to set volume metadata: %w", err)
|
||||
}
|
||||
// As the omap already exists for this image ID return nil.
|
||||
rbdVol.VolID, err = util.GenerateVolID(ctx, rbdVol.Monitors, cr, imagePoolID, rbdVol.Pool,
|
||||
rbdVol.ClusterID, rbdVol.ReservedID, volIDVersion)
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/k8s"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
@ -156,6 +157,8 @@ type rbdVolume struct {
|
||||
LogStrategy string
|
||||
VolName string
|
||||
MonValueFromSecret string
|
||||
// Network namespace file path to execute nsenter command
|
||||
NetNamespaceFilePath string
|
||||
// RequestedVolSize has the size of the volume requested by the user and
|
||||
// this value will not be updated when doing getImageInfo() on rbdVolume.
|
||||
RequestedVolSize int64
|
||||
@ -1918,3 +1921,28 @@ func genVolFromVolIDWithMigration(
|
||||
|
||||
return rv, err
|
||||
}
|
||||
|
||||
// setVolumeMetadata set PV/PVC/PVCNamespace metadata on RBD image.
|
||||
func (rv *rbdVolume) setVolumeMetadata(parameters map[string]string) error {
|
||||
for k, v := range k8s.GetVolumeMetadata(parameters) {
|
||||
err := rv.SetMetadata(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata key %q, value %q on image: %w", k, v, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setSnapshotMetadata Set snapshot-name/snapshot-namespace/snapshotcontent-name metadata
|
||||
// on RBD image.
|
||||
func (rv *rbdVolume) setSnapshotMetadata(parameters map[string]string) error {
|
||||
for k, v := range k8s.GetSnapshotMetadata(parameters) {
|
||||
err := rv.SetMetadata(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata key %q, value %q on image: %w", k, v, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
@ -32,6 +33,47 @@ import (
|
||||
// InvalidPoolID used to denote an invalid pool.
|
||||
const InvalidPoolID int64 = -1
|
||||
|
||||
// ExecuteCommandWithNSEnter executes passed in program with args with nsenter
|
||||
// and returns separate stdout and stderr streams. In case ctx is not set to
|
||||
// context.TODO(), the command will be logged after it was executed.
|
||||
func ExecuteCommandWithNSEnter(ctx context.Context, netPath, program string, args ...string) (string, string, error) {
|
||||
var (
|
||||
sanitizedArgs = StripSecretInArgs(args)
|
||||
stdoutBuf bytes.Buffer
|
||||
stderrBuf bytes.Buffer
|
||||
)
|
||||
|
||||
// check netPath exists
|
||||
if _, err := os.Stat(netPath); err != nil {
|
||||
return "", "", fmt.Errorf("failed to get stat for %s %w", netPath, err)
|
||||
}
|
||||
// nsenter --net=%s -- <program> <args>
|
||||
args = append([]string{fmt.Sprintf("--net=%s", netPath), "--", program}, args...)
|
||||
cmd := exec.Command("nsenter", args...) // #nosec:G204, commands executing not vulnerable.
|
||||
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
err := cmd.Run()
|
||||
stdout := stdoutBuf.String()
|
||||
stderr := stderrBuf.String()
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("an error (%w) occurred while running %s args: %v", err, program, sanitizedArgs)
|
||||
if ctx != context.TODO() {
|
||||
log.UsefulLog(ctx, "%s", err)
|
||||
}
|
||||
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
if ctx != context.TODO() {
|
||||
log.UsefulLog(ctx, "command succeeded: %s %v", program, sanitizedArgs)
|
||||
}
|
||||
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
// ExecCommand executes passed in program with args and returns separate stdout
|
||||
// and stderr streams. In case ctx is not set to context.TODO(), the command
|
||||
// will be logged after it was executed.
|
||||
|
@ -49,6 +49,8 @@ type ClusterInfo struct {
|
||||
// SubvolumeGroup contains the name of the SubvolumeGroup for CSI volumes
|
||||
SubvolumeGroup string `json:"subvolumeGroup"`
|
||||
} `json:"cephFS"`
|
||||
// symlink filepath for the network namespace where we need to execute commands.
|
||||
NetNamespaceFilePath string `json:"netNamespaceFilePath"`
|
||||
}
|
||||
|
||||
// Expected JSON structure in the passed in config file is,
|
||||
@ -161,3 +163,12 @@ func GetClusterID(options map[string]string) (string, error) {
|
||||
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
func GetNetNamespaceFilePath(pathToConfig, clusterID string) (string, error) {
|
||||
cluster, err := readClusterInfo(pathToConfig, clusterID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cluster.NetNamespaceFilePath, nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
@ -138,3 +139,69 @@ func TestCSIConfig(t *testing.T) {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNetNamespaceFilePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterID string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "get NetNamespaceFilePath for cluster-1",
|
||||
clusterID: "cluster-1",
|
||||
want: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster1-net",
|
||||
},
|
||||
{
|
||||
name: "get NetNamespaceFilePath for cluster-2",
|
||||
clusterID: "cluster-2",
|
||||
want: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster2-net",
|
||||
},
|
||||
{
|
||||
name: "when NetNamespaceFilePath is empty",
|
||||
clusterID: "cluster-3",
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
|
||||
csiConfig := []ClusterInfo{
|
||||
{
|
||||
ClusterID: "cluster-1",
|
||||
Monitors: []string{"ip-1", "ip-2"},
|
||||
NetNamespaceFilePath: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster1-net",
|
||||
},
|
||||
{
|
||||
ClusterID: "cluster-2",
|
||||
Monitors: []string{"ip-3", "ip-4"},
|
||||
NetNamespaceFilePath: "/var/lib/kubelet/plugins/rbd.ceph.csi.com/cluster2-net",
|
||||
},
|
||||
{
|
||||
ClusterID: "cluster-3",
|
||||
Monitors: []string{"ip-5", "ip-6"},
|
||||
},
|
||||
}
|
||||
csiConfigFileContent, err := json.Marshal(csiConfig)
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal csi config info %v", err)
|
||||
}
|
||||
tmpConfPath := t.TempDir() + "/ceph-csi.json"
|
||||
err = os.WriteFile(tmpConfPath, csiConfigFileContent, 0o600)
|
||||
if err != nil {
|
||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
ts := tt
|
||||
t.Run(ts.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := GetNetNamespaceFilePath(tmpConfPath, ts.clusterID)
|
||||
if err != nil {
|
||||
t.Errorf("GetNetNamespaceFilePath() error = %v", err)
|
||||
|
||||
return
|
||||
}
|
||||
if got != ts.want {
|
||||
t.Errorf("GetNetNamespaceFilePath() = %v, want %v", got, ts.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,17 @@ import (
|
||||
// to the driver on CreateVolumeRequest/CreateSnapshotRequest calls.
|
||||
const (
|
||||
csiParameterPrefix = "csi.storage.k8s.io/"
|
||||
pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace"
|
||||
|
||||
// PV and PVC metadata keys used by external provisioner as part of
|
||||
// create requests as parameters, when `extra-create-metadata` is true.
|
||||
pvcNameKey = "csi.storage.k8s.io/pvc/name"
|
||||
pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace"
|
||||
pvNameKey = "csi.storage.k8s.io/pv/name"
|
||||
|
||||
// snapshot metadata keys.
|
||||
volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name"
|
||||
volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace"
|
||||
volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name"
|
||||
)
|
||||
|
||||
// RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix.
|
||||
@ -43,3 +53,50 @@ func RemoveCSIPrefixedParameters(param map[string]string) map[string]string {
|
||||
func GetOwner(param map[string]string) string {
|
||||
return param[pvcNamespaceKey]
|
||||
}
|
||||
|
||||
// GetVolumeMetadata filter parameters, only return PV/PVC/PVCNamespace metadata.
|
||||
func GetVolumeMetadata(parameters map[string]string) map[string]string {
|
||||
keys := []string{pvcNameKey, pvcNamespaceKey, pvNameKey}
|
||||
newParam := map[string]string{}
|
||||
for k, v := range parameters {
|
||||
for _, key := range keys {
|
||||
if strings.Contains(k, key) {
|
||||
newParam[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newParam
|
||||
}
|
||||
|
||||
// PrepareVolumeMetadata return PV/PVC/PVCNamespace metadata based on inputs.
|
||||
func PrepareVolumeMetadata(pvcName, pvcNamespace, pvName string) map[string]string {
|
||||
newParam := map[string]string{}
|
||||
if pvcName != "" {
|
||||
newParam[pvcNameKey] = pvcName
|
||||
}
|
||||
if pvcNamespace != "" {
|
||||
newParam[pvcNamespaceKey] = pvcNamespace
|
||||
}
|
||||
if pvName != "" {
|
||||
newParam[pvNameKey] = pvName
|
||||
}
|
||||
|
||||
return newParam
|
||||
}
|
||||
|
||||
// GetSnapshotMetadata filter parameters, only return
|
||||
// snapshot-name/snapshot-namespace/snapshotcontent-name metadata.
|
||||
func GetSnapshotMetadata(parameters map[string]string) map[string]string {
|
||||
keys := []string{volSnapNameKey, volSnapNamespaceKey, volSnapContentNameKey}
|
||||
newParam := map[string]string{}
|
||||
for k, v := range parameters {
|
||||
for _, key := range keys {
|
||||
if strings.Contains(k, key) {
|
||||
newParam[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newParam
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ type Config struct {
|
||||
EnableGRPCMetrics bool // option to enable grpc metrics
|
||||
|
||||
EnableProfiling bool // flag to enable profiling
|
||||
IsControllerServer bool // if set to true start provisoner server
|
||||
IsControllerServer bool // if set to true start provisioner server
|
||||
IsNodeServer bool // if set to true start node server
|
||||
Version bool // cephcsi version
|
||||
|
||||
@ -164,7 +164,7 @@ type KernelVersion struct {
|
||||
SubLevel int
|
||||
ExtraVersion int // prefix of the part after the first "-"
|
||||
Distribution string // component of full extraversion
|
||||
Backport bool // backports have a fixed version/patchlevel/sublevel
|
||||
Backport bool // backport have a fixed version/patchlevel/sublevel
|
||||
}
|
||||
|
||||
// parseKernelRelease parses a kernel release version string into:
|
||||
@ -202,9 +202,9 @@ func parseKernelRelease(release string) (int, int, int, int, error) {
|
||||
|
||||
// CheckKernelSupport checks the running kernel and comparing it to known
|
||||
// versions that have support for required features . Distributors of
|
||||
// enterprise Linux have backported quota support to previous versions. This
|
||||
// enterprise Linux have backport quota support to previous versions. This
|
||||
// function checks if the running kernel is one of the versions that have the
|
||||
// feature/fixes backported.
|
||||
// feature/fixes backport.
|
||||
//
|
||||
// `uname -r` (or Uname().Utsname.Release has a format like 1.2.3-rc.vendor
|
||||
// This can be slit up in the following components: - version (1) - patchlevel
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# This script can be used to install/delete snapshotcontroller and snapshot beta CRD
|
||||
# This script can be used to install/delete snapshotcontroller and snapshot CRD
|
||||
|
||||
SCRIPT_DIR="$(dirname "${0}")"
|
||||
|
||||
|
133
vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
generated
vendored
133
vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
generated
vendored
@ -2,6 +2,7 @@ package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -24,11 +25,13 @@ type CredentialsCacheOptions struct {
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials
|
||||
// within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0.
|
||||
// ExpiryWindowJitterFrac provides a mechanism for randomizing the
|
||||
// expiration of credentials within the configured ExpiryWindow by a random
|
||||
// percentage. Valid values are between 0.0 and 1.0.
|
||||
//
|
||||
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to
|
||||
// expire between 30 to 60 seconds prior to their actual expiration time.
|
||||
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac
|
||||
// is 0.5 then credentials will be set to expire between 30 to 60 seconds
|
||||
// prior to their actual expiration time.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
|
||||
// If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
|
||||
@ -39,8 +42,19 @@ type CredentialsCacheOptions struct {
|
||||
|
||||
// CredentialsCache provides caching and concurrency safe credentials retrieval
|
||||
// via the provider's retrieve method.
|
||||
//
|
||||
// CredentialsCache will look for optional interfaces on the Provider to adjust
|
||||
// how the credential cache handles credentials caching.
|
||||
//
|
||||
// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
|
||||
// credential refresh failures. This could return an updated Credentials
|
||||
// value, or attempt another means of retrieving credentials.
|
||||
//
|
||||
// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
|
||||
// credentials Expires is modified. This could modify how the Credentials
|
||||
// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
|
||||
// Such as providing a floor not to reduce the Expires below.
|
||||
type CredentialsCache struct {
|
||||
// provider is the CredentialProvider implementation to be wrapped by the CredentialCache.
|
||||
provider CredentialsProvider
|
||||
|
||||
options CredentialsCacheOptions
|
||||
@ -48,8 +62,9 @@ type CredentialsCache struct {
|
||||
sf singleflight.Group
|
||||
}
|
||||
|
||||
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic
|
||||
// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for
|
||||
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider
|
||||
// is expected to not be nil. A variadic list of one or more functions can be
|
||||
// provided to modify the CredentialsCache configuration. This allows for
|
||||
// configuration of credential expiry window and jitter.
|
||||
func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
|
||||
options := CredentialsCacheOptions{}
|
||||
@ -81,8 +96,8 @@ func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *C
|
||||
//
|
||||
// Returns and error if the provider's retrieve method returns an error.
|
||||
func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
|
||||
if creds := p.getCreds(); creds != nil {
|
||||
return *creds, nil
|
||||
if creds, ok := p.getCreds(); ok && !creds.Expired() {
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
resCh := p.sf.DoChan("", func() (interface{}, error) {
|
||||
@ -97,39 +112,64 @@ func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
|
||||
}
|
||||
|
||||
func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
|
||||
if creds := p.getCreds(); creds != nil {
|
||||
return *creds, nil
|
||||
currCreds, ok := p.getCreds()
|
||||
if ok && !currCreds.Expired() {
|
||||
return currCreds, nil
|
||||
}
|
||||
|
||||
creds, err := p.provider.Retrieve(ctx)
|
||||
if err == nil {
|
||||
if creds.CanExpire {
|
||||
randFloat64, err := sdkrand.CryptoRandFloat64()
|
||||
if err != nil {
|
||||
return Credentials{}, err
|
||||
}
|
||||
jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
|
||||
creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter))
|
||||
newCreds, err := p.provider.Retrieve(ctx)
|
||||
if err != nil {
|
||||
handleFailToRefresh := defaultHandleFailToRefresh
|
||||
if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok {
|
||||
handleFailToRefresh = cs.HandleFailToRefresh
|
||||
}
|
||||
newCreds, err = handleFailToRefresh(ctx, currCreds, err)
|
||||
if err != nil {
|
||||
return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if newCreds.CanExpire && p.options.ExpiryWindow > 0 {
|
||||
adjustExpiresBy := defaultAdjustExpiresBy
|
||||
if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok {
|
||||
adjustExpiresBy = cs.AdjustExpiresBy
|
||||
}
|
||||
|
||||
p.creds.Store(&creds)
|
||||
randFloat64, err := sdkrand.CryptoRandFloat64()
|
||||
if err != nil {
|
||||
return Credentials{}, fmt.Errorf("failed to get random provider, %w", err)
|
||||
}
|
||||
|
||||
var jitter time.Duration
|
||||
if p.options.ExpiryWindowJitterFrac > 0 {
|
||||
jitter = time.Duration(randFloat64 *
|
||||
p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
|
||||
}
|
||||
|
||||
newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter))
|
||||
if err != nil {
|
||||
return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return creds, err
|
||||
p.creds.Store(&newCreds)
|
||||
return newCreds, nil
|
||||
}
|
||||
|
||||
func (p *CredentialsCache) getCreds() *Credentials {
|
||||
// getCreds returns the currently stored credentials and true. Returning false
|
||||
// if no credentials were stored.
|
||||
func (p *CredentialsCache) getCreds() (Credentials, bool) {
|
||||
v := p.creds.Load()
|
||||
if v == nil {
|
||||
return nil
|
||||
return Credentials{}, false
|
||||
}
|
||||
|
||||
c := v.(*Credentials)
|
||||
if c != nil && c.HasKeys() && !c.Expired() {
|
||||
return c
|
||||
if c == nil || !c.HasKeys() {
|
||||
return Credentials{}, false
|
||||
}
|
||||
|
||||
return nil
|
||||
return *c, true
|
||||
}
|
||||
|
||||
// Invalidate will invalidate the cached credentials. The next call to Retrieve
|
||||
@ -137,3 +177,42 @@ func (p *CredentialsCache) getCreds() *Credentials {
|
||||
func (p *CredentialsCache) Invalidate() {
|
||||
p.creds.Store((*Credentials)(nil))
|
||||
}
|
||||
|
||||
// HandleFailRefreshCredentialsCacheStrategy is an interface for
|
||||
// CredentialsCache to allow CredentialsProvider how failed to refresh
|
||||
// credentials is handled.
|
||||
type HandleFailRefreshCredentialsCacheStrategy interface {
|
||||
// Given the previously cached Credentials, if any, and refresh error, may
|
||||
// returns new or modified set of Credentials, or error.
|
||||
//
|
||||
// Credential caches may use default implementation if nil.
|
||||
HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error)
|
||||
}
|
||||
|
||||
// defaultHandleFailToRefresh returns the passed in error.
|
||||
func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) {
|
||||
return Credentials{}, err
|
||||
}
|
||||
|
||||
// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache
|
||||
// to allow CredentialsProvider to intercept adjustments to Credentials expiry
|
||||
// based on expectations and use cases of CredentialsProvider.
|
||||
//
|
||||
// Credential caches may use default implementation if nil.
|
||||
type AdjustExpiresByCredentialsCacheStrategy interface {
|
||||
// Given a Credentials as input, applying any mutations and
|
||||
// returning the potentially updated Credentials, or error.
|
||||
AdjustExpiresBy(Credentials, time.Duration) (Credentials, error)
|
||||
}
|
||||
|
||||
// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires,
|
||||
// and returns the updated credentials value. If Credentials value's CanExpire
|
||||
// is false, the passed in credentials are returned unchanged.
|
||||
func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) {
|
||||
if !creds.CanExpire {
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
creds.Expires = creds.Expires.Add(dur)
|
||||
return creds, nil
|
||||
}
|
||||
|
12
vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
generated
vendored
@ -83,16 +83,20 @@ type Credentials struct {
|
||||
// Source of the credentials
|
||||
Source string
|
||||
|
||||
// Time the credentials will expire.
|
||||
// States if the credentials can expire or not.
|
||||
CanExpire bool
|
||||
Expires time.Time
|
||||
|
||||
// The time the credentials will expire at. Should be ignored if CanExpire
|
||||
// is false.
|
||||
Expires time.Time
|
||||
}
|
||||
|
||||
// Expired returns if the credentials have expired.
|
||||
func (v Credentials) Expired() bool {
|
||||
if v.CanExpire {
|
||||
// Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
|
||||
// time is always based on reported wall-clock time.
|
||||
// Calling Round(0) on the current time will truncate the monotonic
|
||||
// reading only. Ensures credential expiry time is always based on
|
||||
// reported wall-clock time.
|
||||
return !v.Expires.After(sdk.NowTime().Round(0))
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package aws
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.15.0"
|
||||
const goModuleVersion = "1.16.2"
|
||||
|
12
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
# v1.1.9 (2022-03-30)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.8 (2022-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.7 (2022-03-23)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.6 (2022-03-08)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package configsources
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.1.6"
|
||||
const goModuleVersion = "1.1.9"
|
||||
|
12
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
# v2.4.3 (2022-03-30)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v2.4.2 (2022-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v2.4.1 (2022-03-23)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v2.4.0 (2022-03-08)
|
||||
|
||||
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package endpoints
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "2.4.0"
|
||||
const goModuleVersion = "2.4.3"
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
generated
vendored
@ -29,5 +29,5 @@ func Float64(reader io.Reader) (float64, error) {
|
||||
// CryptoRandFloat64 returns a random float64 obtained from the crypto rand
|
||||
// source.
|
||||
func CryptoRandFloat64() (float64, error) {
|
||||
return Float64(rand.Reader)
|
||||
return Float64(Reader)
|
||||
}
|
||||
|
12
vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
# v1.9.3 (2022-03-30)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.9.2 (2022-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.9.1 (2022-03-23)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.9.0 (2022-03-08)
|
||||
|
||||
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
|
||||
|
@ -3,4 +3,4 @@
|
||||
package presignedurl
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.9.0"
|
||||
const goModuleVersion = "1.9.3"
|
||||
|
12
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
@ -1,3 +1,15 @@
|
||||
# v1.16.3 (2022-03-30)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.16.2 (2022-03-24)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.16.1 (2022-03-23)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.16.0 (2022-03-08)
|
||||
|
||||
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package sts
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.16.0"
|
||||
const goModuleVersion = "1.16.3"
|
||||
|
4
vendor/github.com/aws/smithy-go/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/smithy-go/CHANGELOG.md
generated
vendored
@ -1,3 +1,7 @@
|
||||
# Release (v1.11.2)
|
||||
|
||||
* No change notes available for this release.
|
||||
|
||||
# Release (v1.11.1)
|
||||
|
||||
## Module Highlights
|
||||
|
3
vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
generated
vendored
3
vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
generated
vendored
@ -20,6 +20,9 @@ func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIV
|
||||
|
||||
func (u URIValue) modifyURI(value string) (err error) {
|
||||
*u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true)
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/aws/smithy-go/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/smithy-go/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package smithy
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.11.1"
|
||||
const goModuleVersion = "1.11.2"
|
||||
|
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
@ -53,7 +53,7 @@ github.com/aws/aws-sdk-go/service/sso
|
||||
github.com/aws/aws-sdk-go/service/sso/ssoiface
|
||||
github.com/aws/aws-sdk-go/service/sts
|
||||
github.com/aws/aws-sdk-go/service/sts/stsiface
|
||||
# github.com/aws/aws-sdk-go-v2 v1.15.0
|
||||
# github.com/aws/aws-sdk-go-v2 v1.16.2
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/aws
|
||||
github.com/aws/aws-sdk-go-v2/aws/defaults
|
||||
@ -70,21 +70,21 @@ github.com/aws/aws-sdk-go-v2/internal/sdk
|
||||
github.com/aws/aws-sdk-go-v2/internal/strings
|
||||
github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
|
||||
github.com/aws/aws-sdk-go-v2/internal/timeconv
|
||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6
|
||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources
|
||||
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0
|
||||
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
|
||||
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0
|
||||
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
|
||||
# github.com/aws/aws-sdk-go-v2/service/sts v1.16.0
|
||||
# github.com/aws/aws-sdk-go-v2/service/sts v1.16.3
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/sts
|
||||
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
|
||||
github.com/aws/aws-sdk-go-v2/service/sts/types
|
||||
# github.com/aws/smithy-go v1.11.1
|
||||
# github.com/aws/smithy-go v1.11.2
|
||||
## explicit; go 1.15
|
||||
github.com/aws/smithy-go
|
||||
github.com/aws/smithy-go/document
|
||||
|
Loading…
Reference in New Issue
Block a user