Merge pull request #19 from ceph/devel

Sync rhs:devel with ceph:devel
This commit is contained in:
OpenShift Merge Robot 2021-08-25 22:37:02 -04:00 committed by GitHub
commit 21760def12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 427 additions and 123 deletions

View File

@ -38,6 +38,8 @@ pull_request_rules:
- base~=^(devel)|(release-.+)$ - base~=^(devel)|(release-.+)$
- "#approved-reviews-by>=2" - "#approved-reviews-by>=2"
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell" - "status-success=codespell"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=go-test" - "status-success=go-test"
@ -64,7 +66,7 @@ pull_request_rules:
- label!=DNM - label!=DNM
- label=ready-to-merge - label=ready-to-merge
- base~=^(devel)|(release-.+)$ - base~=^(devel)|(release-.+)$
- "#approved-reviews-by>=1" - "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell" - "status-success=codespell"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=go-test" - "status-success=go-test"
@ -122,7 +124,9 @@ pull_request_rules:
- base=release-v2.0 - base=release-v2.0
- label!=DNM - label!=DNM
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions: actions:
merge: {} merge: {}
dismiss_reviews: {} dismiss_reviews: {}
@ -142,7 +146,9 @@ pull_request_rules:
- base=release-v2.1 - base=release-v2.1
- label!=DNM - label!=DNM
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions: actions:
merge: {} merge: {}
dismiss_reviews: {} dismiss_reviews: {}
@ -162,7 +168,9 @@ pull_request_rules:
- base=release-v3.0 - base=release-v3.0
- label!=DNM - label!=DNM
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
actions: actions:
merge: {} merge: {}
dismiss_reviews: {} dismiss_reviews: {}
@ -182,7 +190,9 @@ pull_request_rules:
- base=release-v3.1 - base=release-v3.1
- label!=DNM - label!=DNM
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=commitlint" - "status-success=commitlint"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20" - "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
@ -212,7 +222,9 @@ pull_request_rules:
- author=mergify[bot] - author=mergify[bot]
- base=release-v3.2 - base=release-v3.2
- label!=DNM - label!=DNM
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell" - "status-success=codespell"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=go-test" - "status-success=go-test"
@ -249,7 +261,9 @@ pull_request_rules:
- author=mergify[bot] - author=mergify[bot]
- base=release-v3.3 - base=release-v3.3
- label!=DNM - label!=DNM
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell" - "status-success=codespell"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=go-test" - "status-success=go-test"
@ -286,7 +300,9 @@ pull_request_rules:
- author=mergify[bot] - author=mergify[bot]
- base=release-v3.4 - base=release-v3.4
- label!=DNM - label!=DNM
- "#approved-reviews-by>=1" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "status-success=codespell" - "status-success=codespell"
- "status-success=multi-arch-build" - "status-success=multi-arch-build"
- "status-success=go-test" - "status-success=go-test"
@ -321,6 +337,8 @@ pull_request_rules:
- label!=DNM - label!=DNM
- base=ci/centos - base=ci/centos
- "#approved-reviews-by>=2" - "#approved-reviews-by>=2"
- "approved-reviews-by=@ceph/ceph-csi-contributors"
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "status-success=ci/centos/job-validation" - "status-success=ci/centos/job-validation"
- "status-success=ci/centos/jjb-validate" - "status-success=ci/centos/jjb-validate"
@ -334,7 +352,7 @@ pull_request_rules:
- label!=DNM - label!=DNM
- label=ready-to-merge - label=ready-to-merge
- base=ci/centos - base=ci/centos
- "#approved-reviews-by>=1" - "approved-reviews-by=@ceph/ceph-csi-maintainers"
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "status-success=ci/centos/job-validation" - "status-success=ci/centos/job-validation"
- "status-success=ci/centos/jjb-validate" - "status-success=ci/centos/jjb-validate"

View File

@ -1,7 +1,9 @@
# Ceph CSI # Ceph CSI
[![GitHub release](https://img.shields.io/github/release/ceph/ceph-csi/all.svg)](https://github.com/ceph/ceph-csi/releases)
[![Go Report [![Go Report
Card](https://goreportcard.com/badge/github.com/ceph/ceph-csi)](https://goreportcard.com/report/github.com/ceph/ceph-csi) Card](https://goreportcard.com/badge/github.com/ceph/ceph-csi)](https://goreportcard.com/report/github.com/ceph/ceph-csi)
[![TODOs](https://badgen.net/https/api.tickgit.com/badgen/github.com/ceph/ceph-csi/devel)](https://www.tickgit.com/browse?repo=github.com/ceph/ceph-csi&branch=devel)
- [Ceph CSI](#ceph-csi) - [Ceph CSI](#ceph-csi)
- [Overview](#overview) - [Overview](#overview)

View File

@ -52,7 +52,7 @@ ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16
CSI_ATTACHER_VERSION=v3.3.0 CSI_ATTACHER_VERSION=v3.3.0
CSI_SNAPSHOTTER_VERSION=v4.2.0 CSI_SNAPSHOTTER_VERSION=v4.2.0
CSI_PROVISIONER_VERSION=v3.0.0 CSI_PROVISIONER_VERSION=v3.0.0
CSI_RESIZER_VERSION=v1.2.0 CSI_RESIZER_VERSION=v1.3.0
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0 CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0
# e2e settings # e2e settings

View File

@ -179,7 +179,7 @@ provisioner:
enabled: true enabled: true
image: image:
repository: k8s.gcr.io/sig-storage/csi-resizer repository: k8s.gcr.io/sig-storage/csi-resizer
tag: v1.2.0 tag: v1.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}

View File

@ -138,6 +138,7 @@ charts and their default values.
| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` | | `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` |
| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` | | `pluginSocketFile` | The filename of the plugin socket | `csi.sock` |
| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` | | `kubeletDir` | kubelet working directory | `/var/lib/kubelet` |
| `cephLogDir` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` |
| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` | | `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` |
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` | | `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` | | `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |

View File

@ -124,6 +124,8 @@ spec:
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: keys-tmp-dir - name: keys-tmp-dir
mountPath: /tmp/csi/keys mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
resources: resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }} {{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.nodeplugin.httpMetrics.enabled }} {{- if .Values.nodeplugin.httpMetrics.enabled }}
@ -169,6 +171,10 @@ spec:
hostPath: hostPath:
path: {{ .Values.kubeletDir }}/pods path: {{ .Values.kubeletDir }}/pods
type: DirectoryOrCreate type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: {{ .Values.cephLogDir }}
type: DirectoryOrCreate
- name: host-dev - name: host-dev
hostPath: hostPath:
path: /dev path: /dev

View File

@ -42,6 +42,8 @@ spec:
readOnly: true readOnly: true
- pathPrefix: '/lib/modules' - pathPrefix: '/lib/modules'
readOnly: true readOnly: true
- pathPrefix: '{{ .Values.cephLogDir }}'
readOnly: false
- pathPrefix: '{{ .Values.kubeletDir }}' - pathPrefix: '{{ .Values.kubeletDir }}'
readOnly: false readOnly: false
{{- end }} {{- end }}

View File

@ -216,7 +216,7 @@ provisioner:
enabled: true enabled: true
image: image:
repository: k8s.gcr.io/sig-storage/csi-resizer repository: k8s.gcr.io/sig-storage/csi-resizer
tag: v1.2.0 tag: v1.3.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
@ -382,6 +382,8 @@ provisionerSocketFile: csi-provisioner.sock
pluginSocketFile: csi.sock pluginSocketFile: csi.sock
# kubelet working directory,can be set using `--root-dir` when starting kubelet. # kubelet working directory,can be set using `--root-dir` when starting kubelet.
kubeletDir: /var/lib/kubelet kubeletDir: /var/lib/kubelet
# Host path location for ceph client processes logging, ex: rbd-nbd
cephLogDir: /var/log/ceph
# Name of the csi-driver # Name of the csi-driver
driverName: rbd.csi.ceph.com driverName: rbd.csi.ceph.com
# Name of the configmap used for state # Name of the configmap used for state

View File

@ -60,7 +60,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-resizer - name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"

View File

@ -38,6 +38,8 @@ spec:
readOnly: true readOnly: true
- pathPrefix: '/var/lib/kubelet/pods' - pathPrefix: '/var/lib/kubelet/pods'
readOnly: false readOnly: false
- pathPrefix: '/var/log/ceph'
readOnly: false
- pathPrefix: '/var/lib/kubelet/plugins/rbd.csi.ceph.com' - pathPrefix: '/var/lib/kubelet/plugins/rbd.csi.ceph.com'
readOnly: false readOnly: false
- pathPrefix: '/var/lib/kubelet/plugins_registry' - pathPrefix: '/var/lib/kubelet/plugins_registry'

View File

@ -97,7 +97,7 @@ spec:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
- name: csi-resizer - name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"

View File

@ -111,6 +111,8 @@ spec:
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: keys-tmp-dir - name: keys-tmp-dir
mountPath: /tmp/csi/keys mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
- name: liveness-prometheus - name: liveness-prometheus
securityContext: securityContext:
privileged: true privileged: true
@ -146,6 +148,10 @@ spec:
hostPath: hostPath:
path: /var/lib/kubelet/pods path: /var/lib/kubelet/pods
type: DirectoryOrCreate type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: /var/log/ceph
type: DirectoryOrCreate
- name: registration-dir - name: registration-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins_registry/ path: /var/lib/kubelet/plugins_registry/

View File

@ -257,6 +257,10 @@ Once your PR has been submitted for review the following criteria will
need to be met before it will be merged: need to be met before it will be merged:
* Each PR needs reviews accepting the change from at least two developers for merging. * Each PR needs reviews accepting the change from at least two developers for merging.
* Each PR needs approval from
[ceph-csi-contributors](https://github.com/orgs/ceph/teams/ceph-csi-contributors)
and
[ceph-csi-maintainers](https://github.com/orgs/ceph/teams/ceph-csi-maintainers).
* It is common to request reviews from those reviewers automatically suggested * It is common to request reviews from those reviewers automatically suggested
by GitHub. by GitHub.
* Each PR needs to have been open for at least 24 working hours to allow for * Each PR needs to have been open for at least 24 working hours to allow for

View File

@ -3,6 +3,7 @@
- [RBD NBD Mounter](#rbd-nbd-mounter) - [RBD NBD Mounter](#rbd-nbd-mounter)
- [Overview](#overview) - [Overview](#overview)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Configuring logging path](#configuring-logging-path)
- [Status](#status) - [Status](#status)
- [Support Matrix](#support-matrix) - [Support Matrix](#support-matrix)
- [CSI spec and Kubernetes version compatibility](#csi-spec-and-kubernetes-version-compatibility) - [CSI spec and Kubernetes version compatibility](#csi-spec-and-kubernetes-version-compatibility)
@ -28,6 +29,41 @@ client-side, which is inside the `csi-rbdplugin` node plugin.
To use the rbd-nbd mounter for RBD-backed PVs, set `mounter` to `rbd-nbd` To use the rbd-nbd mounter for RBD-backed PVs, set `mounter` to `rbd-nbd`
in the StorageClass. in the StorageClass.
### Configuring logging path
If you are using the default rbd nodeplugin daemonset and StorageClass
templates then `cephLogDir` will be `/var/log/ceph`, this directory will be
a host-path and the default log file path will be
`/var/log/ceph/rbd-nbd-<volID>.log`. rbd-nbd creates a log file per volume
under the `cephLogDir` path on NodeStage(map) and removed the same on
the respective NodeUnstage(unmap).
In case if you need a customized log path, you should do below:
- Edit the daemonset templates to change the `cephLogDir`
- If you are using helm charts, then you can use key `cephLogDir`
```
helm install --set cephLogDir=/var/log/ceph-csi/my-dir
```
- For standard templates edit [csi-rbdplugin.yaml](../deploy/rbd/kubernetes/csi-rbdplugin.yaml)
to update `hostPath` for `ceph-logdir`, also edit psp [csi-nodeplugin-psp.yaml](../deploy/rbd/kubernetes/csi-nodeplugin-psp.yaml)
to update `pathPrefix` spec entries.
- Update the StorageClass with the customized log directory path
- Now update rbd StorageClass for `cephLogDir`, for example
```
cephLogDir: "/var/log/prod-A-logs"
```
`NOTE`:
- On uninstall make sure to delete `cephLogDir` on host manually to freeup
some space just in case if there are any uncleaned log files.
- In case if you do not need the rbd-nbd logging to persistent, then just
update the StorageClass for `cephLogDir` to use a non-persistent path.
## Status ## Status
Rbd-nbd support status: **Alpha** Rbd-nbd support status: **Alpha**

View File

@ -406,3 +406,22 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
return checkSum, nil return checkSum, nil
} }
// getKernelVersionFromDaemonset gets the kernel version from the specified container.
func getKernelVersionFromDaemonset(f *framework.Framework, ns, dsn, cn string) (string, error) {
selector, err := getDaemonSetLabelSelector(f, ns, dsn)
if err != nil {
return "", err
}
opt := metav1.ListOptions{
LabelSelector: selector,
}
kernelRelease, stdErr, err := execCommandInContainer(f, "uname -r", ns, cn, &opt)
if err != nil || stdErr != "" {
return "", err
}
return kernelRelease, nil
}

View File

@ -57,6 +57,8 @@ var (
appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml" appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml"
snapshotPath = rbdExamplePath + "snapshot.yaml" snapshotPath = rbdExamplePath + "snapshot.yaml"
defaultCloneCount = 10 defaultCloneCount = 10
nbdMapOptions = "debug-rbd=20"
) )
func deployRBDPlugin() { func deployRBDPlugin() {
@ -178,6 +180,7 @@ func validateRBDImageCount(f *framework.Framework, count int, pool string) {
var _ = Describe("RBD", func() { var _ = Describe("RBD", func() {
f := framework.NewDefaultFramework("rbd") f := framework.NewDefaultFramework("rbd")
var c clientset.Interface var c clientset.Interface
var kernelRelease string
// deploy RBD CSI // deploy RBD CSI
BeforeEach(func() { BeforeEach(func() {
if !testRBD || upgradeTesting { if !testRBD || upgradeTesting {
@ -232,6 +235,27 @@ var _ = Describe("RBD", func() {
e2elog.Failf("failed to create node secret with error %v", err) e2elog.Failf("failed to create node secret with error %v", err)
} }
deployVault(f.ClientSet, deployTimeout) deployVault(f.ClientSet, deployTimeout)
// wait for provisioner deployment
err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
}
// wait for nodeplugin deamonset pods
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
}
kernelRelease, err = getKernelVersionFromDaemonset(f, cephCSINamespace, rbdDaemonsetName, "csi-rbdplugin")
if err != nil {
e2elog.Failf("failed to get the kernel version with error %v", err)
}
// default io-timeout=0, needs kernel >= 5.4
if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) {
nbdMapOptions = "debug-rbd=20,io-timeout=330"
}
}) })
AfterEach(func() { AfterEach(func() {
@ -302,20 +326,6 @@ var _ = Describe("RBD", func() {
Context("Test RBD CSI", func() { Context("Test RBD CSI", func() {
It("Test RBD CSI", func() { It("Test RBD CSI", func() {
By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
}
})
By("checking nodeplugin deamonset pods are running", func() {
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
}
})
// test only if ceph-csi is deployed via helm // test only if ceph-csi is deployed via helm
if helmTest { if helmTest {
By("verify PVC and app binding on helm installation", func() { By("verify PVC and app binding on helm installation", func() {
@ -410,7 +420,10 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"mounter": "rbd-nbd"}, map[string]string{
"mounter": "rbd-nbd",
"mapOptions": nbdMapOptions,
},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)
@ -432,10 +445,6 @@ var _ = Describe("RBD", func() {
}) })
By("Resize rbd-nbd PVC and check application directory size", func() { By("Resize rbd-nbd PVC and check application directory size", func() {
kernelRelease, err := util.GetKernelVersion()
if err != nil {
e2elog.Failf("failed to get kernel version with error %v", err)
}
if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) { if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
err := deleteResource(rbdExamplePath + "storageclass.yaml") err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -447,7 +456,10 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"mounter": "rbd-nbd"}, map[string]string{
"mounter": "rbd-nbd",
"mapOptions": nbdMapOptions,
},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)
@ -489,7 +501,10 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"mounter": "rbd-nbd"}, map[string]string{
"mounter": "rbd-nbd",
"mapOptions": nbdMapOptions,
},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)
@ -640,7 +655,11 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"mounter": "rbd-nbd", "encrypted": "true"}, map[string]string{
"mounter": "rbd-nbd",
"mapOptions": nbdMapOptions,
"encrypted": "true",
},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)
@ -993,7 +1012,11 @@ var _ = Describe("RBD", func() {
f, f,
defaultSCName, defaultSCName,
nil, nil,
map[string]string{"imageFeatures": "layering,journaling,exclusive-lock", "mounter": "rbd-nbd"}, map[string]string{
"imageFeatures": "layering,journaling,exclusive-lock",
"mounter": "rbd-nbd",
"mapOptions": nbdMapOptions,
},
deletePolicy) deletePolicy)
if err != nil { if err != nil {
e2elog.Failf("failed to create storageclass with error %v", err) e2elog.Failf("failed to create storageclass with error %v", err)

View File

@ -40,6 +40,28 @@ var nbdResizeSupport = []util.KernelVersion{
}, // standard 5.3+ versions }, // standard 5.3+ versions
} }
// To use `io-timeout=0` we need
// www.mail-archive.com/linux-block@vger.kernel.org/msg38060.html
// nolint:gomnd // numbers specify Kernel versions.
var nbdZeroIOtimeoutSupport = []util.KernelVersion{
{
Version: 5,
PatchLevel: 4,
SubLevel: 0,
ExtraVersion: 0,
Distribution: "",
Backport: false,
}, // standard 5.4+ versions
{
Version: 4,
PatchLevel: 18,
SubLevel: 0,
ExtraVersion: 305,
Distribution: ".el8",
Backport: true,
}, // CentOS 8.4
}
func imageSpec(pool, image string) string { func imageSpec(pool, image string) string {
if radosNamespace != "" { if radosNamespace != "" {
return pool + "/" + radosNamespace + "/" + image return pool + "/" + radosNamespace + "/" + image

View File

@ -69,6 +69,12 @@ parameters:
# on supported nodes # on supported nodes
# mounter: rbd-nbd # mounter: rbd-nbd
# (optional) ceph client log location, eg: rbd-nbd
# By default host-path /var/log/ceph of node is bind-mounted into
# csi-rbdplugin pod at /var/log/ceph mount path. See docs/rbd-nbd.md
# for available configuration options.
# cephLogDir: /var/log/ceph
# (optional) Prefix to use for naming RBD images. # (optional) Prefix to use for naming RBD images.
# If omitted, defaults to "csi-vol-". # If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-" # volumeNamePrefix: "foo-bar-"

2
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
github.com/kubernetes-csi/csi-lib-utils v0.9.1 github.com/kubernetes-csi/csi-lib-utils v0.10.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec
github.com/onsi/ginkgo v1.16.4 github.com/onsi/ginkgo v1.16.4

5
go.sum
View File

@ -187,7 +187,6 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
@ -638,8 +637,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= github.com/kubernetes-csi/csi-lib-utils v0.10.0 h1:Aqm8X81eCzzfH/bvIEqSWtcbK9HF9NbFk4d+le1snVA=
github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= github.com/kubernetes-csi/csi-lib-utils v0.10.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=

View File

@ -25,10 +25,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/manager/signals"
) )
// ContollerManager is the interface that will wrap Add function. // Manager is the interface that will wrap Add function.
// The New controllers which gets added, as to implement Add function to get // The New controllers which gets added, as to implement Add function to get
// started by the manager. // started by the manager.
type ContollerManager interface { type Manager interface {
Add(manager.Manager, Config) error Add(manager.Manager, Config) error
} }
@ -39,7 +39,7 @@ type Config struct {
} }
// ControllerList holds the list of managers need to be started. // ControllerList holds the list of managers need to be started.
var ControllerList []ContollerManager var ControllerList []Manager
// addToManager calls the registered managers Add method. // addToManager calls the registered managers Add method.
func addToManager(mgr manager.Manager, config Config) error { func addToManager(mgr manager.Manager, config Config) error {

View File

@ -45,8 +45,8 @@ type ReconcilePersistentVolume struct {
} }
var ( var (
_ reconcile.Reconciler = &ReconcilePersistentVolume{} _ reconcile.Reconciler = &ReconcilePersistentVolume{}
_ ctrl.ContollerManager = &ReconcilePersistentVolume{} _ ctrl.Manager = &ReconcilePersistentVolume{}
) )
// Init will add the ReconcilePersistentVolume to the list. // Init will add the ReconcilePersistentVolume to the list.

View File

@ -274,6 +274,10 @@ func (ns *NodeServer) NodeStageVolume(
volOptions.MapOptions = req.GetVolumeContext()["mapOptions"] volOptions.MapOptions = req.GetVolumeContext()["mapOptions"]
volOptions.UnmapOptions = req.GetVolumeContext()["unmapOptions"] volOptions.UnmapOptions = req.GetVolumeContext()["unmapOptions"]
volOptions.Mounter = req.GetVolumeContext()["mounter"] volOptions.Mounter = req.GetVolumeContext()["mounter"]
volOptions.LogDir = req.GetVolumeContext()["cephLogDir"]
if volOptions.LogDir == "" {
volOptions.LogDir = defaultLogDir
}
err = volOptions.Connect(cr) err = volOptions.Connect(cr)
if err != nil { if err != nil {
@ -818,13 +822,17 @@ func (ns *NodeServer) NodeUnstageVolume(
// Unmapping rbd device // Unmapping rbd device
imageSpec := imgInfo.String() imageSpec := imgInfo.String()
if err = detachRBDImageOrDeviceSpec(
ctx, imageSpec, dArgs := detachRBDImageArgs{
true, imageOrDeviceSpec: imageSpec,
imgInfo.NbdAccess, isImageSpec: true,
imgInfo.Encrypted, isNbd: imgInfo.NbdAccess,
req.GetVolumeId(), encrypted: imgInfo.Encrypted,
imgInfo.UnmapOptions); err != nil { volumeID: req.GetVolumeId(),
unmapOptions: imgInfo.UnmapOptions,
logDir: imgInfo.LogDir,
}
if err = detachRBDImageOrDeviceSpec(ctx, dArgs); err != nil {
util.ErrorLog( util.ErrorLog(
ctx, ctx,
"error unmapping volume (%s) from staging path (%s): (%v)", "error unmapping volume (%s) from staging path (%s): (%v)",

View File

@ -48,10 +48,22 @@ const (
rbdUnmapCmdNbdMissingMap = "rbd-nbd: %s is not mapped" rbdUnmapCmdNbdMissingMap = "rbd-nbd: %s is not mapped"
rbdMapConnectionTimeout = "Connection timed out" rbdMapConnectionTimeout = "Connection timed out"
defaultNbdReAttachTimeout = 300 defaultNbdReAttachTimeout = 300 /* in seconds */
defaultNbdIOTimeout = 0 /* do not abort the requests */
useNbdNetlink = "try-netlink" // The default way of creating nbd devices via rbd-nbd is through the
// legacy ioctl interface, to take advantage of netlink features we
// should specify `try-netlink` flag explicitly.
useNbdNetlink = "try-netlink"
// `reattach-timeout` of rbd-nbd is to tweak NBD_ATTR_DEAD_CONN_TIMEOUT.
// It specifies how long the device should be held waiting for the
// userspace process to come back to life.
setNbdReattach = "reattach-timeout" setNbdReattach = "reattach-timeout"
// `io-timeout` of rbd-nbd is to tweak NBD_ATTR_TIMEOUT. It specifies
// how long the IO should wait to get handled before bailing out.
setNbdIOTimeout = "io-timeout"
) )
var hasNBD = false var hasNBD = false
@ -81,6 +93,16 @@ type nbdDeviceInfo struct {
Device string `json:"device"` Device string `json:"device"`
} }
type detachRBDImageArgs struct {
imageOrDeviceSpec string
isImageSpec bool
isNbd bool
encrypted bool
volumeID string
unmapOptions string
logDir string
}
// rbdGetDeviceList queries rbd about mapped devices and returns a list of rbdDeviceInfo // rbdGetDeviceList queries rbd about mapped devices and returns a list of rbdDeviceInfo
// It will selectively list devices mapped using krbd or nbd as specified by accessType. // It will selectively list devices mapped using krbd or nbd as specified by accessType.
func rbdGetDeviceList(ctx context.Context, accessType string) ([]rbdDeviceInfo, error) { func rbdGetDeviceList(ctx context.Context, accessType string) ([]rbdDeviceInfo, error) {
@ -239,6 +261,9 @@ func appendDeviceTypeAndOptions(cmdArgs []string, isNbd, isThick bool, userOptio
if !strings.Contains(userOptions, setNbdReattach) { if !strings.Contains(userOptions, setNbdReattach) {
cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdReattach, defaultNbdReAttachTimeout)) cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
} }
if !strings.Contains(userOptions, setNbdIOTimeout) {
cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdIOTimeout, defaultNbdIOTimeout))
}
} }
if isThick { if isThick {
// When an image is thick-provisioned, any discard/unmap/trim // When an image is thick-provisioned, any discard/unmap/trim
@ -263,6 +288,9 @@ func appendRbdNbdCliOptions(cmdArgs []string, userOptions string) []string {
if !strings.Contains(userOptions, setNbdReattach) { if !strings.Contains(userOptions, setNbdReattach) {
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdReattach, defaultNbdReAttachTimeout)) cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
} }
if !strings.Contains(userOptions, setNbdIOTimeout) {
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdIOTimeout, defaultNbdIOTimeout))
}
if userOptions != "" { if userOptions != "" {
options := strings.Split(userOptions, ",") options := strings.Split(userOptions, ",")
for _, opt := range options { for _, opt := range options {
@ -296,6 +324,11 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
util.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err) util.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err)
} }
if isNbd {
mapArgs = append(mapArgs, "--log-file",
getCephClientLogFileName(volOpt.VolID, volOpt.LogDir, "rbd-nbd"))
}
cli := rbd cli := rbd
if device != "" { if device != "" {
// TODO: use rbd cli for attach/detach in the future // TODO: use rbd cli for attach/detach in the future
@ -317,14 +350,16 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
util.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr) util.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr)
// unmap rbd image if connection timeout // unmap rbd image if connection timeout
if strings.Contains(err.Error(), rbdMapConnectionTimeout) { if strings.Contains(err.Error(), rbdMapConnectionTimeout) {
detErr := detachRBDImageOrDeviceSpec( dArgs := detachRBDImageArgs{
ctx, imageOrDeviceSpec: imagePath,
imagePath, isImageSpec: true,
true, isNbd: isNbd,
isNbd, encrypted: volOpt.isEncrypted(),
volOpt.isEncrypted(), volumeID: volOpt.VolID,
volOpt.VolID, unmapOptions: volOpt.UnmapOptions,
volOpt.UnmapOptions) logDir: volOpt.LogDir,
}
detErr := detachRBDImageOrDeviceSpec(ctx, dArgs)
if detErr != nil { if detErr != nil {
util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr) util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
} }
@ -367,22 +402,29 @@ func detachRBDDevice(ctx context.Context, devicePath, volumeID, unmapOptions str
nbdType = true nbdType = true
} }
return detachRBDImageOrDeviceSpec(ctx, devicePath, false, nbdType, encrypted, volumeID, unmapOptions) dArgs := detachRBDImageArgs{
imageOrDeviceSpec: devicePath,
isImageSpec: false,
isNbd: nbdType,
encrypted: encrypted,
volumeID: volumeID,
unmapOptions: unmapOptions,
}
return detachRBDImageOrDeviceSpec(ctx, dArgs)
} }
// detachRBDImageOrDeviceSpec detaches an rbd imageSpec or devicePath, with additional checking // detachRBDImageOrDeviceSpec detaches an rbd imageSpec or devicePath, with additional checking
// when imageSpec is used to decide if image is already unmapped. // when imageSpec is used to decide if image is already unmapped.
func detachRBDImageOrDeviceSpec( func detachRBDImageOrDeviceSpec(
ctx context.Context, ctx context.Context,
imageOrDeviceSpec string, dArgs detachRBDImageArgs) error {
isImageSpec, isNbd, encrypted bool, if dArgs.encrypted {
volumeID, unmapOptions string) error { mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
if encrypted {
mapperFile, mapperPath := util.VolumeMapper(volumeID)
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath) mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s", util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s",
mapperPath, imageOrDeviceSpec, err) mapperPath, dArgs.imageOrDeviceSpec, err)
return err return err
} }
@ -391,31 +433,38 @@ func detachRBDImageOrDeviceSpec(
err = util.CloseEncryptedVolume(ctx, mapperFile) err = util.CloseEncryptedVolume(ctx, mapperFile)
if err != nil { if err != nil {
util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s", util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s",
mapperPath, imageOrDeviceSpec, err) mapperPath, dArgs.imageOrDeviceSpec, err)
return err return err
} }
imageOrDeviceSpec = mappedDevice dArgs.imageOrDeviceSpec = mappedDevice
} }
} }
unmapArgs := []string{"unmap", imageOrDeviceSpec} unmapArgs := []string{"unmap", dArgs.imageOrDeviceSpec}
unmapArgs = appendDeviceTypeAndOptions(unmapArgs, isNbd, false, unmapOptions) unmapArgs = appendDeviceTypeAndOptions(unmapArgs, dArgs.isNbd, false, dArgs.unmapOptions)
_, stderr, err := util.ExecCommand(ctx, rbd, unmapArgs...) _, stderr, err := util.ExecCommand(ctx, rbd, unmapArgs...)
if err != nil { if err != nil {
// Messages for krbd and nbd differ, hence checking either of them for missing mapping // Messages for krbd and nbd differ, hence checking either of them for missing mapping
// This is not applicable when a device path is passed in // This is not applicable when a device path is passed in
if isImageSpec && if dArgs.isImageSpec &&
(strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, imageOrDeviceSpec)) || (strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, dArgs.imageOrDeviceSpec)) ||
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, imageOrDeviceSpec))) { strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, dArgs.imageOrDeviceSpec))) {
// Devices found not to be mapped are treated as a successful detach // Devices found not to be mapped are treated as a successful detach
util.TraceLog(ctx, "image or device spec (%s) not mapped", imageOrDeviceSpec) util.TraceLog(ctx, "image or device spec (%s) not mapped", dArgs.imageOrDeviceSpec)
return nil return nil
} }
return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", imageOrDeviceSpec, err, stderr) return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", dArgs.imageOrDeviceSpec, err, stderr)
}
if dArgs.isNbd && dArgs.logDir != "" {
logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd")
if err = os.Remove(logFile); err != nil {
util.WarningLog(ctx, "failed to remove logfile: %s, error: %v",
logFile, err)
}
} }
return nil return nil

View File

@ -50,6 +50,7 @@ const (
rbdImageWatcherSteps = 10 rbdImageWatcherSteps = 10
rbdDefaultMounter = "rbd" rbdDefaultMounter = "rbd"
rbdNbdMounter = "rbd-nbd" rbdNbdMounter = "rbd-nbd"
defaultLogDir = "/var/log/ceph"
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when // Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
// command is not supported by ceph manager. Used to check errors and recover when the command // command is not supported by ceph manager. Used to check errors and recover when the command
@ -136,6 +137,7 @@ type rbdVolume struct {
ReservedID string ReservedID string
MapOptions string MapOptions string
UnmapOptions string UnmapOptions string
LogDir string
VolName string `json:"volName"` VolName string `json:"volName"`
MonValueFromSecret string `json:"monValueFromSecret"` MonValueFromSecret string `json:"monValueFromSecret"`
VolSize int64 `json:"volSize"` VolSize int64 `json:"volSize"`
@ -1523,6 +1525,7 @@ type rbdImageMetadataStash struct {
NbdAccess bool `json:"accessType"` NbdAccess bool `json:"accessType"`
Encrypted bool `json:"encrypted"` Encrypted bool `json:"encrypted"`
DevicePath string `json:"device"` // holds NBD device path for now DevicePath string `json:"device"` // holds NBD device path for now
LogDir string `json:"logDir"` // holds the client log path
} }
// file name in which image metadata is stashed. // file name in which image metadata is stashed.
@ -1553,6 +1556,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
imgMeta.NbdAccess = false imgMeta.NbdAccess = false
if volOptions.Mounter == rbdTonbd && hasNBD { if volOptions.Mounter == rbdTonbd && hasNBD {
imgMeta.NbdAccess = true imgMeta.NbdAccess = true
imgMeta.LogDir = volOptions.LogDir
} }
encodedBytes, err := json.Marshal(imgMeta) encodedBytes, err := json.Marshal(imgMeta)
@ -2002,3 +2006,16 @@ func (ri *rbdImage) addSnapshotScheduling(
return nil return nil
} }
// getCephClientLogFileName compiles the complete log file path based on inputs.
func getCephClientLogFileName(id, logDir, prefix string) string {
if prefix == "" {
prefix = "ceph"
}
if logDir == "" {
logDir = defaultLogDir
}
return fmt.Sprintf("%s/%s-%s.log", logDir, prefix, id)
}

View File

@ -189,3 +189,74 @@ func TestGetMappedID(t *testing.T) {
}) })
} }
} }
func TestGetCephClientLogFileName(t *testing.T) {
t.Parallel()
type args struct {
id string
logDir string
prefix string
}
volID := "0001-0024-fed5480a-f00f-417a-a51d-31d8a8144c03-0000000000000003-eba90b33-0156-11ec-a30b-4678a93686c2"
tests := []struct {
name string
args args
expected string
}{
{
name: "test for empty id",
args: args{
id: "",
logDir: "/var/log/ceph-csi",
prefix: "rbd-nbd",
},
expected: "/var/log/ceph-csi/rbd-nbd-.log",
},
{
name: "test for empty logDir",
args: args{
id: volID,
logDir: "",
prefix: "rbd-nbd",
},
expected: "/var/log/ceph/rbd-nbd-" + volID + ".log",
},
{
name: "test for empty prefix",
args: args{
id: volID,
logDir: "/var/log/ceph-csi",
prefix: "",
},
expected: "/var/log/ceph-csi/ceph-" + volID + ".log",
},
{
name: "test for all unavailable args",
args: args{
id: "",
logDir: "",
prefix: "",
},
expected: "/var/log/ceph/ceph-.log",
},
{
name: "test for all available args",
args: args{
id: volID,
logDir: "/var/log/ceph-csi",
prefix: "rbd-nbd",
},
expected: "/var/log/ceph-csi/rbd-nbd-" + volID + ".log",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix)
if val != tt.expected {
t.Errorf("getCephClientLogFileName() got = %v, expected %v", val, tt.expected)
}
})
}
}

View File

@ -65,9 +65,9 @@ type ClusterMappingInfo struct {
// ... // ...
// }] // }]
func readClusterMappingInfo() (*[]ClusterMappingInfo, error) { func readClusterMappingInfo(filename string) (*[]ClusterMappingInfo, error) {
var info []ClusterMappingInfo var info []ClusterMappingInfo
content, err := ioutil.ReadFile(clusterMappingConfigFile) content, err := ioutil.ReadFile(filename) // #nosec:G304, file inclusion via variable.
if err != nil { if err != nil {
err = fmt.Errorf("error fetching clusterID mapping %w", err) err = fmt.Errorf("error fetching clusterID mapping %w", err)
@ -83,11 +83,11 @@ func readClusterMappingInfo() (*[]ClusterMappingInfo, error) {
return &info, nil return &info, nil
} }
// GetClusterMappingInfo returns corresponding cluster details like clusterID's // getClusterMappingInfo returns corresponding cluster details like clusterID's
// poolID,fscID lists read from configfile. // poolID,fscID lists read from 'filename'.
func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) { func getClusterMappingInfo(clusterID, filename string) (*[]ClusterMappingInfo, error) {
var mappingInfo []ClusterMappingInfo var mappingInfo []ClusterMappingInfo
info, err := readClusterMappingInfo() info, err := readClusterMappingInfo(filename)
if err != nil { if err != nil {
// discard not found error as this file is expected to be created by // discard not found error as this file is expected to be created by
// the admin in case of failover. // the admin in case of failover.
@ -114,3 +114,9 @@ func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
return &mappingInfo, nil return &mappingInfo, nil
} }
// GetClusterMappingInfo returns corresponding cluster details like clusterID's
// poolID,fscID lists read from configfile.
func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
return getClusterMappingInfo(clusterID, clusterMappingConfigFile)
}

View File

@ -138,19 +138,19 @@ func TestGetClusterMappingInfo(t *testing.T) {
currentTT := tt currentTT := tt
t.Run(currentTT.name, func(t *testing.T) { t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
clusterMappingConfigFile = fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI) mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI)
if len(currentTT.mappingFilecontent) != 0 { if len(currentTT.mappingFilecontent) != 0 {
err = ioutil.WriteFile(clusterMappingConfigFile, currentTT.mappingFilecontent, 0o600) err = ioutil.WriteFile(mappingConfigFile, currentTT.mappingFilecontent, 0o600)
if err != nil { if err != nil {
t.Errorf("GetClusterMappingInfo() error = %v", err) t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err)
} }
} }
data, mErr := GetClusterMappingInfo(currentTT.clusterID) data, mErr := getClusterMappingInfo(currentTT.clusterID, mappingConfigFile)
if (mErr != nil) != currentTT.expectErr { if (mErr != nil) != currentTT.expectErr {
t.Errorf("GetClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr) t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr)
} }
if !reflect.DeepEqual(data, currentTT.expectedData) { if !reflect.DeepEqual(data, currentTT.expectedData) {
t.Errorf("GetClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData) t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData)
} }
}) })
} }

View File

@ -94,9 +94,10 @@ def list_pvc_vol_name_mapping(arg):
# list all pvc and get mapping # list all pvc and get mapping
else: else:
cmd += ['get', 'pvc', '-o', 'json'] cmd += ['get', 'pvc', '-o', 'json']
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate() stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to list pvc %s", stderr) print("failed to list pvc %s", stderr)
@ -194,10 +195,9 @@ def check_pv_name_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
if arg.toolboxdeployed is True: if arg.toolboxdeployed is True:
kube = get_cmd_prefix(arg) kube = get_cmd_prefix(arg)
cmd = kube + cmd cmd = kube + cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stderr=subprocess.STDOUT) stdout, stderr = out.communicate()
stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
return False return False
name = b'' name = b''
@ -229,10 +229,9 @@ def check_image_in_cluster(arg, image_uuid, pool_name, volname_prefix):
kube = get_cmd_prefix(arg) kube = get_cmd_prefix(arg)
cmd = kube + cmd cmd = kube + cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stderr=subprocess.STDOUT) stdout, stderr = out.communicate()
stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print(b"failed to toolbox %s", stderr) print(b"failed to toolbox %s", stderr)
@ -256,10 +255,10 @@ def check_image_uuid_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
if arg.toolboxdeployed is True: if arg.toolboxdeployed is True:
kube = get_cmd_prefix(arg) kube = get_cmd_prefix(arg)
cmd = kube + cmd cmd = kube + cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate() with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get toolbox %s", stderr) print("failed to get toolbox %s", stderr)
@ -320,9 +319,10 @@ def get_volume_handler_from_pv(arg, pvname):
cmd += ["--kubeconfig", arg.kubeconfig] cmd += ["--kubeconfig", arg.kubeconfig]
cmd += ['get', 'pv', pvname, '-o', 'json'] cmd += ['get', 'pv', pvname, '-o', 'json']
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate() stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to pv %s", stderr) print("failed to pv %s", stderr)
@ -347,10 +347,10 @@ def get_tool_box_pod_name(arg):
cmd += ["--kubeconfig", arg.kubeconfig] cmd += ["--kubeconfig", arg.kubeconfig]
cmd += ['get', 'po', '-l=app=rook-ceph-tools', cmd += ['get', 'po', '-l=app=rook-ceph-tools',
'-n', arg.rooknamespace, '-o', 'json'] '-n', arg.rooknamespace, '-o', 'json']
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate() with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get toolbox pod name %s", stderr) print("failed to get toolbox pod name %s", stderr)
@ -377,10 +377,10 @@ def get_pool_name(arg, vol_id, is_rbd):
if arg.toolboxdeployed is True: if arg.toolboxdeployed is True:
kube = get_cmd_prefix(arg) kube = get_cmd_prefix(arg)
cmd = kube + cmd cmd = kube + cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate() with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get the pool name %s", stderr) print("failed to get the pool name %s", stderr)
@ -426,9 +426,10 @@ def check_subvol_path(arg, subvol_name, subvol_group, fsname):
if arg.toolboxdeployed is True: if arg.toolboxdeployed is True:
kube = get_cmd_prefix(arg) kube = get_cmd_prefix(arg)
cmd = kube + cmd cmd = kube + cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate() stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get toolbox %s", stderr) print("failed to get toolbox %s", stderr)
@ -451,9 +452,10 @@ def get_subvol_group(arg):
cmd += ["--kubeconfig", arg.kubeconfig] cmd += ["--kubeconfig", arg.kubeconfig]
cmd += ['get', 'cm', arg.configmap, '-o', 'json'] cmd += ['get', 'cm', arg.configmap, '-o', 'json']
cmd += ['--namespace', arg.configmapnamespace] cmd += ['--namespace', arg.configmapnamespace]
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate() stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get configmap %s", stderr) print("failed to get configmap %s", stderr)
@ -463,6 +465,7 @@ def get_subvol_group(arg):
except ValueError as err: except ValueError as err:
print(err, stdout) print(err, stdout)
sys.exit() sys.exit()
# default subvolumeGroup # default subvolumeGroup
subvol_group = "csi" subvol_group = "csi"
cm_data = config_map['data'].get('config.json') cm_data = config_map['data'].get('config.json')
@ -508,9 +511,10 @@ def get_pv_data(arg, pvname):
cmd += ["--kubeconfig", arg.kubeconfig] cmd += ["--kubeconfig", arg.kubeconfig]
cmd += ['get', 'pv', pvname, '-o', 'json'] cmd += ['get', 'pv', pvname, '-o', 'json']
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
stdout, stderr = out.communicate() stdout, stderr = out.communicate()
if stderr is not None: if stderr is not None:
if arg.debug: if arg.debug:
print("failed to get pv %s", stderr) print("failed to get pv %s", stderr)

View File

@ -84,7 +84,8 @@ func ExitOnConnectionLoss() func() bool {
if err := ioutil.WriteFile(terminationLogPath, []byte(terminationMsg), 0644); err != nil { if err := ioutil.WriteFile(terminationLogPath, []byte(terminationMsg), 0644); err != nil {
klog.Errorf("%s: %s", terminationLogPath, err) klog.Errorf("%s: %s", terminationLogPath, err)
} }
klog.Fatalf(terminationMsg) klog.Exit(terminationMsg)
// Not reached.
return false return false
} }
} }

2
vendor/modules.txt vendored
View File

@ -176,7 +176,7 @@ github.com/inconshreveable/mousetrap
github.com/jmespath/go-jmespath github.com/jmespath/go-jmespath
# github.com/json-iterator/go v1.1.11 # github.com/json-iterator/go v1.1.11
github.com/json-iterator/go github.com/json-iterator/go
# github.com/kubernetes-csi/csi-lib-utils v0.9.1 # github.com/kubernetes-csi/csi-lib-utils v0.10.0
## explicit ## explicit
github.com/kubernetes-csi/csi-lib-utils/connection github.com/kubernetes-csi/csi-lib-utils/connection
github.com/kubernetes-csi/csi-lib-utils/metrics github.com/kubernetes-csi/csi-lib-utils/metrics