mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-19 11:19:30 +00:00
commit
21760def12
36
.mergify.yml
36
.mergify.yml
@ -38,6 +38,8 @@ pull_request_rules:
|
||||
- base~=^(devel)|(release-.+)$
|
||||
- "#approved-reviews-by>=2"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
@ -64,7 +66,7 @@ pull_request_rules:
|
||||
- label!=DNM
|
||||
- label=ready-to-merge
|
||||
- base~=^(devel)|(release-.+)$
|
||||
- "#approved-reviews-by>=1"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
@ -122,7 +124,9 @@ pull_request_rules:
|
||||
- base=release-v2.0
|
||||
- label!=DNM
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
actions:
|
||||
merge: {}
|
||||
dismiss_reviews: {}
|
||||
@ -142,7 +146,9 @@ pull_request_rules:
|
||||
- base=release-v2.1
|
||||
- label!=DNM
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
actions:
|
||||
merge: {}
|
||||
dismiss_reviews: {}
|
||||
@ -162,7 +168,9 @@ pull_request_rules:
|
||||
- base=release-v3.0
|
||||
- label!=DNM
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
actions:
|
||||
merge: {}
|
||||
dismiss_reviews: {}
|
||||
@ -182,7 +190,9 @@ pull_request_rules:
|
||||
- base=release-v3.1
|
||||
- label!=DNM
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=commitlint"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
|
||||
@ -212,7 +222,9 @@ pull_request_rules:
|
||||
- author=mergify[bot]
|
||||
- base=release-v3.2
|
||||
- label!=DNM
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
@ -249,7 +261,9 @@ pull_request_rules:
|
||||
- author=mergify[bot]
|
||||
- base=release-v3.3
|
||||
- label!=DNM
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
@ -286,7 +300,9 @@ pull_request_rules:
|
||||
- author=mergify[bot]
|
||||
- base=release-v3.4
|
||||
- label!=DNM
|
||||
- "#approved-reviews-by>=1"
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "status-success=codespell"
|
||||
- "status-success=multi-arch-build"
|
||||
- "status-success=go-test"
|
||||
@ -321,6 +337,8 @@ pull_request_rules:
|
||||
- label!=DNM
|
||||
- base=ci/centos
|
||||
- "#approved-reviews-by>=2"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=ci/centos/job-validation"
|
||||
- "status-success=ci/centos/jjb-validate"
|
||||
@ -334,7 +352,7 @@ pull_request_rules:
|
||||
- label!=DNM
|
||||
- label=ready-to-merge
|
||||
- base=ci/centos
|
||||
- "#approved-reviews-by>=1"
|
||||
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=ci/centos/job-validation"
|
||||
- "status-success=ci/centos/jjb-validate"
|
||||
|
@ -1,7 +1,9 @@
|
||||
# Ceph CSI
|
||||
|
||||
[![GitHub release](https://img.shields.io/github/release/ceph/ceph-csi/all.svg)](https://github.com/ceph/ceph-csi/releases)
|
||||
[![Go Report
|
||||
Card](https://goreportcard.com/badge/github.com/ceph/ceph-csi)](https://goreportcard.com/report/github.com/ceph/ceph-csi)
|
||||
[![TODOs](https://badgen.net/https/api.tickgit.com/badgen/github.com/ceph/ceph-csi/devel)](https://www.tickgit.com/browse?repo=github.com/ceph/ceph-csi&branch=devel)
|
||||
|
||||
- [Ceph CSI](#ceph-csi)
|
||||
- [Overview](#overview)
|
||||
|
@ -52,7 +52,7 @@ ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16
|
||||
CSI_ATTACHER_VERSION=v3.3.0
|
||||
CSI_SNAPSHOTTER_VERSION=v4.2.0
|
||||
CSI_PROVISIONER_VERSION=v3.0.0
|
||||
CSI_RESIZER_VERSION=v1.2.0
|
||||
CSI_RESIZER_VERSION=v1.3.0
|
||||
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0
|
||||
|
||||
# e2e settings
|
||||
|
@ -179,7 +179,7 @@ provisioner:
|
||||
enabled: true
|
||||
image:
|
||||
repository: k8s.gcr.io/sig-storage/csi-resizer
|
||||
tag: v1.2.0
|
||||
tag: v1.3.0
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
|
@ -138,6 +138,7 @@ charts and their default values.
|
||||
| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` |
|
||||
| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` |
|
||||
| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` |
|
||||
| `cephLogDir` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` |
|
||||
| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` |
|
||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||
|
@ -124,6 +124,8 @@ spec:
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: keys-tmp-dir
|
||||
mountPath: /tmp/csi/keys
|
||||
- name: ceph-logdir
|
||||
mountPath: /var/log/ceph
|
||||
resources:
|
||||
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||
{{- if .Values.nodeplugin.httpMetrics.enabled }}
|
||||
@ -169,6 +171,10 @@ spec:
|
||||
hostPath:
|
||||
path: {{ .Values.kubeletDir }}/pods
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-logdir
|
||||
hostPath:
|
||||
path: {{ .Values.cephLogDir }}
|
||||
type: DirectoryOrCreate
|
||||
- name: host-dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
|
@ -42,6 +42,8 @@ spec:
|
||||
readOnly: true
|
||||
- pathPrefix: '/lib/modules'
|
||||
readOnly: true
|
||||
- pathPrefix: '{{ .Values.cephLogDir }}'
|
||||
readOnly: false
|
||||
- pathPrefix: '{{ .Values.kubeletDir }}'
|
||||
readOnly: false
|
||||
{{- end }}
|
||||
|
@ -216,7 +216,7 @@ provisioner:
|
||||
enabled: true
|
||||
image:
|
||||
repository: k8s.gcr.io/sig-storage/csi-resizer
|
||||
tag: v1.2.0
|
||||
tag: v1.3.0
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
@ -382,6 +382,8 @@ provisionerSocketFile: csi-provisioner.sock
|
||||
pluginSocketFile: csi.sock
|
||||
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
|
||||
kubeletDir: /var/lib/kubelet
|
||||
# Host path location for ceph client processes logging, ex: rbd-nbd
|
||||
cephLogDir: /var/log/ceph
|
||||
# Name of the csi-driver
|
||||
driverName: rbd.csi.ceph.com
|
||||
# Name of the configmap used for state
|
||||
|
@ -60,7 +60,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
|
@ -38,6 +38,8 @@ spec:
|
||||
readOnly: true
|
||||
- pathPrefix: '/var/lib/kubelet/pods'
|
||||
readOnly: false
|
||||
- pathPrefix: '/var/log/ceph'
|
||||
readOnly: false
|
||||
- pathPrefix: '/var/lib/kubelet/plugins/rbd.csi.ceph.com'
|
||||
readOnly: false
|
||||
- pathPrefix: '/var/lib/kubelet/plugins_registry'
|
||||
|
@ -97,7 +97,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
|
@ -111,6 +111,8 @@ spec:
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: keys-tmp-dir
|
||||
mountPath: /tmp/csi/keys
|
||||
- name: ceph-logdir
|
||||
mountPath: /var/log/ceph
|
||||
- name: liveness-prometheus
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -146,6 +148,10 @@ spec:
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-logdir
|
||||
hostPath:
|
||||
path: /var/log/ceph
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
|
@ -257,6 +257,10 @@ Once your PR has been submitted for review the following criteria will
|
||||
need to be met before it will be merged:
|
||||
|
||||
* Each PR needs reviews accepting the change from at least two developers for merging.
|
||||
* Each PR needs approval from
|
||||
[ceph-csi-contributors](https://github.com/orgs/ceph/teams/ceph-csi-contributors)
|
||||
and
|
||||
[ceph-csi-maintainers](https://github.com/orgs/ceph/teams/ceph-csi-maintainers).
|
||||
* It is common to request reviews from those reviewers automatically suggested
|
||||
by GitHub.
|
||||
* Each PR needs to have been open for at least 24 working hours to allow for
|
||||
|
@ -3,6 +3,7 @@
|
||||
- [RBD NBD Mounter](#rbd-nbd-mounter)
|
||||
- [Overview](#overview)
|
||||
- [Configuration](#configuration)
|
||||
- [Configuring logging path](#configuring-logging-path)
|
||||
- [Status](#status)
|
||||
- [Support Matrix](#support-matrix)
|
||||
- [CSI spec and Kubernetes version compatibility](#csi-spec-and-kubernetes-version-compatibility)
|
||||
@ -28,6 +29,41 @@ client-side, which is inside the `csi-rbdplugin` node plugin.
|
||||
To use the rbd-nbd mounter for RBD-backed PVs, set `mounter` to `rbd-nbd`
|
||||
in the StorageClass.
|
||||
|
||||
### Configuring logging path
|
||||
|
||||
If you are using the default rbd nodeplugin daemonset and StorageClass
|
||||
templates then `cephLogDir` will be `/var/log/ceph`, this directory will be
|
||||
a host-path and the default log file path will be
|
||||
`/var/log/ceph/rbd-nbd-<volID>.log`. rbd-nbd creates a log file per volume
|
||||
under the `cephLogDir` path on NodeStage(map) and removed the same on
|
||||
the respective NodeUnstage(unmap).
|
||||
|
||||
In case if you need a customized log path, you should do below:
|
||||
|
||||
- Edit the daemonset templates to change the `cephLogDir`
|
||||
- If you are using helm charts, then you can use key `cephLogDir`
|
||||
|
||||
```
|
||||
helm install --set cephLogDir=/var/log/ceph-csi/my-dir
|
||||
```
|
||||
|
||||
- For standard templates edit [csi-rbdplugin.yaml](../deploy/rbd/kubernetes/csi-rbdplugin.yaml)
|
||||
to update `hostPath` for `ceph-logdir`, also edit psp [csi-nodeplugin-psp.yaml](../deploy/rbd/kubernetes/csi-nodeplugin-psp.yaml)
|
||||
to update `pathPrefix` spec entries.
|
||||
- Update the StorageClass with the customized log directory path
|
||||
- Now update rbd StorageClass for `cephLogDir`, for example
|
||||
|
||||
```
|
||||
cephLogDir: "/var/log/prod-A-logs"
|
||||
```
|
||||
|
||||
`NOTE`:
|
||||
|
||||
- On uninstall make sure to delete `cephLogDir` on host manually to freeup
|
||||
some space just in case if there are any uncleaned log files.
|
||||
- In case if you do not need the rbd-nbd logging to persistent, then just
|
||||
update the StorageClass for `cephLogDir` to use a non-persistent path.
|
||||
|
||||
## Status
|
||||
|
||||
Rbd-nbd support status: **Alpha**
|
||||
|
19
e2e/pod.go
19
e2e/pod.go
@ -406,3 +406,22 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
|
||||
|
||||
return checkSum, nil
|
||||
}
|
||||
|
||||
// getKernelVersionFromDaemonset gets the kernel version from the specified container.
|
||||
func getKernelVersionFromDaemonset(f *framework.Framework, ns, dsn, cn string) (string, error) {
|
||||
selector, err := getDaemonSetLabelSelector(f, ns, dsn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
}
|
||||
|
||||
kernelRelease, stdErr, err := execCommandInContainer(f, "uname -r", ns, cn, &opt)
|
||||
if err != nil || stdErr != "" {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return kernelRelease, nil
|
||||
}
|
||||
|
69
e2e/rbd.go
69
e2e/rbd.go
@ -57,6 +57,8 @@ var (
|
||||
appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml"
|
||||
snapshotPath = rbdExamplePath + "snapshot.yaml"
|
||||
defaultCloneCount = 10
|
||||
|
||||
nbdMapOptions = "debug-rbd=20"
|
||||
)
|
||||
|
||||
func deployRBDPlugin() {
|
||||
@ -178,6 +180,7 @@ func validateRBDImageCount(f *framework.Framework, count int, pool string) {
|
||||
var _ = Describe("RBD", func() {
|
||||
f := framework.NewDefaultFramework("rbd")
|
||||
var c clientset.Interface
|
||||
var kernelRelease string
|
||||
// deploy RBD CSI
|
||||
BeforeEach(func() {
|
||||
if !testRBD || upgradeTesting {
|
||||
@ -232,6 +235,27 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("failed to create node secret with error %v", err)
|
||||
}
|
||||
deployVault(f.ClientSet, deployTimeout)
|
||||
|
||||
// wait for provisioner deployment
|
||||
err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
|
||||
}
|
||||
|
||||
// wait for nodeplugin deamonset pods
|
||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
|
||||
}
|
||||
|
||||
kernelRelease, err = getKernelVersionFromDaemonset(f, cephCSINamespace, rbdDaemonsetName, "csi-rbdplugin")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get the kernel version with error %v", err)
|
||||
}
|
||||
// default io-timeout=0, needs kernel >= 5.4
|
||||
if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) {
|
||||
nbdMapOptions = "debug-rbd=20,io-timeout=330"
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -302,20 +326,6 @@ var _ = Describe("RBD", func() {
|
||||
|
||||
Context("Test RBD CSI", func() {
|
||||
It("Test RBD CSI", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
// test only if ceph-csi is deployed via helm
|
||||
if helmTest {
|
||||
By("verify PVC and app binding on helm installation", func() {
|
||||
@ -410,7 +420,10 @@ var _ = Describe("RBD", func() {
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{"mounter": "rbd-nbd"},
|
||||
map[string]string{
|
||||
"mounter": "rbd-nbd",
|
||||
"mapOptions": nbdMapOptions,
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
@ -432,10 +445,6 @@ var _ = Describe("RBD", func() {
|
||||
})
|
||||
|
||||
By("Resize rbd-nbd PVC and check application directory size", func() {
|
||||
kernelRelease, err := util.GetKernelVersion()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get kernel version with error %v", err)
|
||||
}
|
||||
if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
@ -447,7 +456,10 @@ var _ = Describe("RBD", func() {
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{"mounter": "rbd-nbd"},
|
||||
map[string]string{
|
||||
"mounter": "rbd-nbd",
|
||||
"mapOptions": nbdMapOptions,
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
@ -489,7 +501,10 @@ var _ = Describe("RBD", func() {
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{"mounter": "rbd-nbd"},
|
||||
map[string]string{
|
||||
"mounter": "rbd-nbd",
|
||||
"mapOptions": nbdMapOptions,
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
@ -640,7 +655,11 @@ var _ = Describe("RBD", func() {
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{"mounter": "rbd-nbd", "encrypted": "true"},
|
||||
map[string]string{
|
||||
"mounter": "rbd-nbd",
|
||||
"mapOptions": nbdMapOptions,
|
||||
"encrypted": "true",
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
@ -993,7 +1012,11 @@ var _ = Describe("RBD", func() {
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{"imageFeatures": "layering,journaling,exclusive-lock", "mounter": "rbd-nbd"},
|
||||
map[string]string{
|
||||
"imageFeatures": "layering,journaling,exclusive-lock",
|
||||
"mounter": "rbd-nbd",
|
||||
"mapOptions": nbdMapOptions,
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
|
@ -40,6 +40,28 @@ var nbdResizeSupport = []util.KernelVersion{
|
||||
}, // standard 5.3+ versions
|
||||
}
|
||||
|
||||
// To use `io-timeout=0` we need
|
||||
// www.mail-archive.com/linux-block@vger.kernel.org/msg38060.html
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
var nbdZeroIOtimeoutSupport = []util.KernelVersion{
|
||||
{
|
||||
Version: 5,
|
||||
PatchLevel: 4,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 0,
|
||||
Distribution: "",
|
||||
Backport: false,
|
||||
}, // standard 5.4+ versions
|
||||
{
|
||||
Version: 4,
|
||||
PatchLevel: 18,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 305,
|
||||
Distribution: ".el8",
|
||||
Backport: true,
|
||||
}, // CentOS 8.4
|
||||
}
|
||||
|
||||
func imageSpec(pool, image string) string {
|
||||
if radosNamespace != "" {
|
||||
return pool + "/" + radosNamespace + "/" + image
|
||||
|
@ -69,6 +69,12 @@ parameters:
|
||||
# on supported nodes
|
||||
# mounter: rbd-nbd
|
||||
|
||||
# (optional) ceph client log location, eg: rbd-nbd
|
||||
# By default host-path /var/log/ceph of node is bind-mounted into
|
||||
# csi-rbdplugin pod at /var/log/ceph mount path. See docs/rbd-nbd.md
|
||||
# for available configuration options.
|
||||
# cephLogDir: /var/log/ceph
|
||||
|
||||
# (optional) Prefix to use for naming RBD images.
|
||||
# If omitted, defaults to "csi-vol-".
|
||||
# volumeNamePrefix: "foo-bar-"
|
||||
|
2
go.mod
2
go.mod
@ -12,7 +12,7 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.10.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
|
||||
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
|
5
go.sum
5
go.sum
@ -187,7 +187,6 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h
|
||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
|
||||
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
@ -638,8 +637,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.10.0 h1:Aqm8X81eCzzfH/bvIEqSWtcbK9HF9NbFk4d+le1snVA=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.10.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
|
||||
|
@ -25,10 +25,10 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
)
|
||||
|
||||
// ContollerManager is the interface that will wrap Add function.
|
||||
// Manager is the interface that will wrap Add function.
|
||||
// The New controllers which gets added, as to implement Add function to get
|
||||
// started by the manager.
|
||||
type ContollerManager interface {
|
||||
type Manager interface {
|
||||
Add(manager.Manager, Config) error
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ type Config struct {
|
||||
}
|
||||
|
||||
// ControllerList holds the list of managers need to be started.
|
||||
var ControllerList []ContollerManager
|
||||
var ControllerList []Manager
|
||||
|
||||
// addToManager calls the registered managers Add method.
|
||||
func addToManager(mgr manager.Manager, config Config) error {
|
||||
|
@ -45,8 +45,8 @@ type ReconcilePersistentVolume struct {
|
||||
}
|
||||
|
||||
var (
|
||||
_ reconcile.Reconciler = &ReconcilePersistentVolume{}
|
||||
_ ctrl.ContollerManager = &ReconcilePersistentVolume{}
|
||||
_ reconcile.Reconciler = &ReconcilePersistentVolume{}
|
||||
_ ctrl.Manager = &ReconcilePersistentVolume{}
|
||||
)
|
||||
|
||||
// Init will add the ReconcilePersistentVolume to the list.
|
||||
|
@ -274,6 +274,10 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
volOptions.MapOptions = req.GetVolumeContext()["mapOptions"]
|
||||
volOptions.UnmapOptions = req.GetVolumeContext()["unmapOptions"]
|
||||
volOptions.Mounter = req.GetVolumeContext()["mounter"]
|
||||
volOptions.LogDir = req.GetVolumeContext()["cephLogDir"]
|
||||
if volOptions.LogDir == "" {
|
||||
volOptions.LogDir = defaultLogDir
|
||||
}
|
||||
|
||||
err = volOptions.Connect(cr)
|
||||
if err != nil {
|
||||
@ -818,13 +822,17 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
|
||||
// Unmapping rbd device
|
||||
imageSpec := imgInfo.String()
|
||||
if err = detachRBDImageOrDeviceSpec(
|
||||
ctx, imageSpec,
|
||||
true,
|
||||
imgInfo.NbdAccess,
|
||||
imgInfo.Encrypted,
|
||||
req.GetVolumeId(),
|
||||
imgInfo.UnmapOptions); err != nil {
|
||||
|
||||
dArgs := detachRBDImageArgs{
|
||||
imageOrDeviceSpec: imageSpec,
|
||||
isImageSpec: true,
|
||||
isNbd: imgInfo.NbdAccess,
|
||||
encrypted: imgInfo.Encrypted,
|
||||
volumeID: req.GetVolumeId(),
|
||||
unmapOptions: imgInfo.UnmapOptions,
|
||||
logDir: imgInfo.LogDir,
|
||||
}
|
||||
if err = detachRBDImageOrDeviceSpec(ctx, dArgs); err != nil {
|
||||
util.ErrorLog(
|
||||
ctx,
|
||||
"error unmapping volume (%s) from staging path (%s): (%v)",
|
||||
|
@ -48,10 +48,22 @@ const (
|
||||
rbdUnmapCmdNbdMissingMap = "rbd-nbd: %s is not mapped"
|
||||
rbdMapConnectionTimeout = "Connection timed out"
|
||||
|
||||
defaultNbdReAttachTimeout = 300
|
||||
defaultNbdReAttachTimeout = 300 /* in seconds */
|
||||
defaultNbdIOTimeout = 0 /* do not abort the requests */
|
||||
|
||||
useNbdNetlink = "try-netlink"
|
||||
// The default way of creating nbd devices via rbd-nbd is through the
|
||||
// legacy ioctl interface, to take advantage of netlink features we
|
||||
// should specify `try-netlink` flag explicitly.
|
||||
useNbdNetlink = "try-netlink"
|
||||
|
||||
// `reattach-timeout` of rbd-nbd is to tweak NBD_ATTR_DEAD_CONN_TIMEOUT.
|
||||
// It specifies how long the device should be held waiting for the
|
||||
// userspace process to come back to life.
|
||||
setNbdReattach = "reattach-timeout"
|
||||
|
||||
// `io-timeout` of rbd-nbd is to tweak NBD_ATTR_TIMEOUT. It specifies
|
||||
// how long the IO should wait to get handled before bailing out.
|
||||
setNbdIOTimeout = "io-timeout"
|
||||
)
|
||||
|
||||
var hasNBD = false
|
||||
@ -81,6 +93,16 @@ type nbdDeviceInfo struct {
|
||||
Device string `json:"device"`
|
||||
}
|
||||
|
||||
type detachRBDImageArgs struct {
|
||||
imageOrDeviceSpec string
|
||||
isImageSpec bool
|
||||
isNbd bool
|
||||
encrypted bool
|
||||
volumeID string
|
||||
unmapOptions string
|
||||
logDir string
|
||||
}
|
||||
|
||||
// rbdGetDeviceList queries rbd about mapped devices and returns a list of rbdDeviceInfo
|
||||
// It will selectively list devices mapped using krbd or nbd as specified by accessType.
|
||||
func rbdGetDeviceList(ctx context.Context, accessType string) ([]rbdDeviceInfo, error) {
|
||||
@ -239,6 +261,9 @@ func appendDeviceTypeAndOptions(cmdArgs []string, isNbd, isThick bool, userOptio
|
||||
if !strings.Contains(userOptions, setNbdReattach) {
|
||||
cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
|
||||
}
|
||||
if !strings.Contains(userOptions, setNbdIOTimeout) {
|
||||
cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdIOTimeout, defaultNbdIOTimeout))
|
||||
}
|
||||
}
|
||||
if isThick {
|
||||
// When an image is thick-provisioned, any discard/unmap/trim
|
||||
@ -263,6 +288,9 @@ func appendRbdNbdCliOptions(cmdArgs []string, userOptions string) []string {
|
||||
if !strings.Contains(userOptions, setNbdReattach) {
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
|
||||
}
|
||||
if !strings.Contains(userOptions, setNbdIOTimeout) {
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdIOTimeout, defaultNbdIOTimeout))
|
||||
}
|
||||
if userOptions != "" {
|
||||
options := strings.Split(userOptions, ",")
|
||||
for _, opt := range options {
|
||||
@ -296,6 +324,11 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
|
||||
util.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err)
|
||||
}
|
||||
|
||||
if isNbd {
|
||||
mapArgs = append(mapArgs, "--log-file",
|
||||
getCephClientLogFileName(volOpt.VolID, volOpt.LogDir, "rbd-nbd"))
|
||||
}
|
||||
|
||||
cli := rbd
|
||||
if device != "" {
|
||||
// TODO: use rbd cli for attach/detach in the future
|
||||
@ -317,14 +350,16 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
|
||||
util.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr)
|
||||
// unmap rbd image if connection timeout
|
||||
if strings.Contains(err.Error(), rbdMapConnectionTimeout) {
|
||||
detErr := detachRBDImageOrDeviceSpec(
|
||||
ctx,
|
||||
imagePath,
|
||||
true,
|
||||
isNbd,
|
||||
volOpt.isEncrypted(),
|
||||
volOpt.VolID,
|
||||
volOpt.UnmapOptions)
|
||||
dArgs := detachRBDImageArgs{
|
||||
imageOrDeviceSpec: imagePath,
|
||||
isImageSpec: true,
|
||||
isNbd: isNbd,
|
||||
encrypted: volOpt.isEncrypted(),
|
||||
volumeID: volOpt.VolID,
|
||||
unmapOptions: volOpt.UnmapOptions,
|
||||
logDir: volOpt.LogDir,
|
||||
}
|
||||
detErr := detachRBDImageOrDeviceSpec(ctx, dArgs)
|
||||
if detErr != nil {
|
||||
util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
|
||||
}
|
||||
@ -367,22 +402,29 @@ func detachRBDDevice(ctx context.Context, devicePath, volumeID, unmapOptions str
|
||||
nbdType = true
|
||||
}
|
||||
|
||||
return detachRBDImageOrDeviceSpec(ctx, devicePath, false, nbdType, encrypted, volumeID, unmapOptions)
|
||||
dArgs := detachRBDImageArgs{
|
||||
imageOrDeviceSpec: devicePath,
|
||||
isImageSpec: false,
|
||||
isNbd: nbdType,
|
||||
encrypted: encrypted,
|
||||
volumeID: volumeID,
|
||||
unmapOptions: unmapOptions,
|
||||
}
|
||||
|
||||
return detachRBDImageOrDeviceSpec(ctx, dArgs)
|
||||
}
|
||||
|
||||
// detachRBDImageOrDeviceSpec detaches an rbd imageSpec or devicePath, with additional checking
|
||||
// when imageSpec is used to decide if image is already unmapped.
|
||||
func detachRBDImageOrDeviceSpec(
|
||||
ctx context.Context,
|
||||
imageOrDeviceSpec string,
|
||||
isImageSpec, isNbd, encrypted bool,
|
||||
volumeID, unmapOptions string) error {
|
||||
if encrypted {
|
||||
mapperFile, mapperPath := util.VolumeMapper(volumeID)
|
||||
dArgs detachRBDImageArgs) error {
|
||||
if dArgs.encrypted {
|
||||
mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
|
||||
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s",
|
||||
mapperPath, imageOrDeviceSpec, err)
|
||||
mapperPath, dArgs.imageOrDeviceSpec, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -391,31 +433,38 @@ func detachRBDImageOrDeviceSpec(
|
||||
err = util.CloseEncryptedVolume(ctx, mapperFile)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s",
|
||||
mapperPath, imageOrDeviceSpec, err)
|
||||
mapperPath, dArgs.imageOrDeviceSpec, err)
|
||||
|
||||
return err
|
||||
}
|
||||
imageOrDeviceSpec = mappedDevice
|
||||
dArgs.imageOrDeviceSpec = mappedDevice
|
||||
}
|
||||
}
|
||||
|
||||
unmapArgs := []string{"unmap", imageOrDeviceSpec}
|
||||
unmapArgs = appendDeviceTypeAndOptions(unmapArgs, isNbd, false, unmapOptions)
|
||||
unmapArgs := []string{"unmap", dArgs.imageOrDeviceSpec}
|
||||
unmapArgs = appendDeviceTypeAndOptions(unmapArgs, dArgs.isNbd, false, dArgs.unmapOptions)
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, rbd, unmapArgs...)
|
||||
if err != nil {
|
||||
// Messages for krbd and nbd differ, hence checking either of them for missing mapping
|
||||
// This is not applicable when a device path is passed in
|
||||
if isImageSpec &&
|
||||
(strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, imageOrDeviceSpec)) ||
|
||||
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, imageOrDeviceSpec))) {
|
||||
if dArgs.isImageSpec &&
|
||||
(strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, dArgs.imageOrDeviceSpec)) ||
|
||||
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, dArgs.imageOrDeviceSpec))) {
|
||||
// Devices found not to be mapped are treated as a successful detach
|
||||
util.TraceLog(ctx, "image or device spec (%s) not mapped", imageOrDeviceSpec)
|
||||
util.TraceLog(ctx, "image or device spec (%s) not mapped", dArgs.imageOrDeviceSpec)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", imageOrDeviceSpec, err, stderr)
|
||||
return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", dArgs.imageOrDeviceSpec, err, stderr)
|
||||
}
|
||||
if dArgs.isNbd && dArgs.logDir != "" {
|
||||
logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd")
|
||||
if err = os.Remove(logFile); err != nil {
|
||||
util.WarningLog(ctx, "failed to remove logfile: %s, error: %v",
|
||||
logFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -50,6 +50,7 @@ const (
|
||||
rbdImageWatcherSteps = 10
|
||||
rbdDefaultMounter = "rbd"
|
||||
rbdNbdMounter = "rbd-nbd"
|
||||
defaultLogDir = "/var/log/ceph"
|
||||
|
||||
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
|
||||
// command is not supported by ceph manager. Used to check errors and recover when the command
|
||||
@ -136,6 +137,7 @@ type rbdVolume struct {
|
||||
ReservedID string
|
||||
MapOptions string
|
||||
UnmapOptions string
|
||||
LogDir string
|
||||
VolName string `json:"volName"`
|
||||
MonValueFromSecret string `json:"monValueFromSecret"`
|
||||
VolSize int64 `json:"volSize"`
|
||||
@ -1523,6 +1525,7 @@ type rbdImageMetadataStash struct {
|
||||
NbdAccess bool `json:"accessType"`
|
||||
Encrypted bool `json:"encrypted"`
|
||||
DevicePath string `json:"device"` // holds NBD device path for now
|
||||
LogDir string `json:"logDir"` // holds the client log path
|
||||
}
|
||||
|
||||
// file name in which image metadata is stashed.
|
||||
@ -1553,6 +1556,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
|
||||
imgMeta.NbdAccess = false
|
||||
if volOptions.Mounter == rbdTonbd && hasNBD {
|
||||
imgMeta.NbdAccess = true
|
||||
imgMeta.LogDir = volOptions.LogDir
|
||||
}
|
||||
|
||||
encodedBytes, err := json.Marshal(imgMeta)
|
||||
@ -2002,3 +2006,16 @@ func (ri *rbdImage) addSnapshotScheduling(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCephClientLogFileName compiles the complete log file path based on inputs.
|
||||
func getCephClientLogFileName(id, logDir, prefix string) string {
|
||||
if prefix == "" {
|
||||
prefix = "ceph"
|
||||
}
|
||||
|
||||
if logDir == "" {
|
||||
logDir = defaultLogDir
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s-%s.log", logDir, prefix, id)
|
||||
}
|
||||
|
@ -189,3 +189,74 @@ func TestGetMappedID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCephClientLogFileName(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
id string
|
||||
logDir string
|
||||
prefix string
|
||||
}
|
||||
volID := "0001-0024-fed5480a-f00f-417a-a51d-31d8a8144c03-0000000000000003-eba90b33-0156-11ec-a30b-4678a93686c2"
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "test for empty id",
|
||||
args: args{
|
||||
id: "",
|
||||
logDir: "/var/log/ceph-csi",
|
||||
prefix: "rbd-nbd",
|
||||
},
|
||||
expected: "/var/log/ceph-csi/rbd-nbd-.log",
|
||||
},
|
||||
{
|
||||
name: "test for empty logDir",
|
||||
args: args{
|
||||
id: volID,
|
||||
logDir: "",
|
||||
prefix: "rbd-nbd",
|
||||
},
|
||||
expected: "/var/log/ceph/rbd-nbd-" + volID + ".log",
|
||||
},
|
||||
{
|
||||
name: "test for empty prefix",
|
||||
args: args{
|
||||
id: volID,
|
||||
logDir: "/var/log/ceph-csi",
|
||||
prefix: "",
|
||||
},
|
||||
expected: "/var/log/ceph-csi/ceph-" + volID + ".log",
|
||||
},
|
||||
{
|
||||
name: "test for all unavailable args",
|
||||
args: args{
|
||||
id: "",
|
||||
logDir: "",
|
||||
prefix: "",
|
||||
},
|
||||
expected: "/var/log/ceph/ceph-.log",
|
||||
},
|
||||
{
|
||||
name: "test for all available args",
|
||||
args: args{
|
||||
id: volID,
|
||||
logDir: "/var/log/ceph-csi",
|
||||
prefix: "rbd-nbd",
|
||||
},
|
||||
expected: "/var/log/ceph-csi/rbd-nbd-" + volID + ".log",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix)
|
||||
if val != tt.expected {
|
||||
t.Errorf("getCephClientLogFileName() got = %v, expected %v", val, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -65,9 +65,9 @@ type ClusterMappingInfo struct {
|
||||
// ...
|
||||
// }]
|
||||
|
||||
func readClusterMappingInfo() (*[]ClusterMappingInfo, error) {
|
||||
func readClusterMappingInfo(filename string) (*[]ClusterMappingInfo, error) {
|
||||
var info []ClusterMappingInfo
|
||||
content, err := ioutil.ReadFile(clusterMappingConfigFile)
|
||||
content, err := ioutil.ReadFile(filename) // #nosec:G304, file inclusion via variable.
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error fetching clusterID mapping %w", err)
|
||||
|
||||
@ -83,11 +83,11 @@ func readClusterMappingInfo() (*[]ClusterMappingInfo, error) {
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// GetClusterMappingInfo returns corresponding cluster details like clusterID's
|
||||
// poolID,fscID lists read from configfile.
|
||||
func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
|
||||
// getClusterMappingInfo returns corresponding cluster details like clusterID's
|
||||
// poolID,fscID lists read from 'filename'.
|
||||
func getClusterMappingInfo(clusterID, filename string) (*[]ClusterMappingInfo, error) {
|
||||
var mappingInfo []ClusterMappingInfo
|
||||
info, err := readClusterMappingInfo()
|
||||
info, err := readClusterMappingInfo(filename)
|
||||
if err != nil {
|
||||
// discard not found error as this file is expected to be created by
|
||||
// the admin in case of failover.
|
||||
@ -114,3 +114,9 @@ func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
|
||||
|
||||
return &mappingInfo, nil
|
||||
}
|
||||
|
||||
// GetClusterMappingInfo returns corresponding cluster details like clusterID's
|
||||
// poolID,fscID lists read from configfile.
|
||||
func GetClusterMappingInfo(clusterID string) (*[]ClusterMappingInfo, error) {
|
||||
return getClusterMappingInfo(clusterID, clusterMappingConfigFile)
|
||||
}
|
||||
|
@ -138,19 +138,19 @@ func TestGetClusterMappingInfo(t *testing.T) {
|
||||
currentTT := tt
|
||||
t.Run(currentTT.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
clusterMappingConfigFile = fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI)
|
||||
mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI)
|
||||
if len(currentTT.mappingFilecontent) != 0 {
|
||||
err = ioutil.WriteFile(clusterMappingConfigFile, currentTT.mappingFilecontent, 0o600)
|
||||
err = ioutil.WriteFile(mappingConfigFile, currentTT.mappingFilecontent, 0o600)
|
||||
if err != nil {
|
||||
t.Errorf("GetClusterMappingInfo() error = %v", err)
|
||||
t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err)
|
||||
}
|
||||
}
|
||||
data, mErr := GetClusterMappingInfo(currentTT.clusterID)
|
||||
data, mErr := getClusterMappingInfo(currentTT.clusterID, mappingConfigFile)
|
||||
if (mErr != nil) != currentTT.expectErr {
|
||||
t.Errorf("GetClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr)
|
||||
t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr)
|
||||
}
|
||||
if !reflect.DeepEqual(data, currentTT.expectedData) {
|
||||
t.Errorf("GetClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData)
|
||||
t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -94,9 +94,10 @@ def list_pvc_vol_name_mapping(arg):
|
||||
# list all pvc and get mapping
|
||||
else:
|
||||
cmd += ['get', 'pvc', '-o', 'json']
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to list pvc %s", stderr)
|
||||
@ -194,10 +195,9 @@ def check_pv_name_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
if arg.toolboxdeployed is True:
|
||||
kube = get_cmd_prefix(arg)
|
||||
cmd = kube + cmd
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
stdout, stderr = out.communicate()
|
||||
if stderr is not None:
|
||||
return False
|
||||
name = b''
|
||||
@ -229,10 +229,9 @@ def check_image_in_cluster(arg, image_uuid, pool_name, volname_prefix):
|
||||
kube = get_cmd_prefix(arg)
|
||||
cmd = kube + cmd
|
||||
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
stdout, stderr = out.communicate()
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print(b"failed to toolbox %s", stderr)
|
||||
@ -256,10 +255,10 @@ def check_image_uuid_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
if arg.toolboxdeployed is True:
|
||||
kube = get_cmd_prefix(arg)
|
||||
cmd = kube + cmd
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
|
||||
stdout, stderr = out.communicate()
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get toolbox %s", stderr)
|
||||
@ -320,9 +319,10 @@ def get_volume_handler_from_pv(arg, pvname):
|
||||
cmd += ["--kubeconfig", arg.kubeconfig]
|
||||
|
||||
cmd += ['get', 'pv', pvname, '-o', 'json']
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to pv %s", stderr)
|
||||
@ -347,10 +347,10 @@ def get_tool_box_pod_name(arg):
|
||||
cmd += ["--kubeconfig", arg.kubeconfig]
|
||||
cmd += ['get', 'po', '-l=app=rook-ceph-tools',
|
||||
'-n', arg.rooknamespace, '-o', 'json']
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
|
||||
stdout, stderr = out.communicate()
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get toolbox pod name %s", stderr)
|
||||
@ -377,10 +377,10 @@ def get_pool_name(arg, vol_id, is_rbd):
|
||||
if arg.toolboxdeployed is True:
|
||||
kube = get_cmd_prefix(arg)
|
||||
cmd = kube + cmd
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
|
||||
stdout, stderr = out.communicate()
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get the pool name %s", stderr)
|
||||
@ -426,9 +426,10 @@ def check_subvol_path(arg, subvol_name, subvol_group, fsname):
|
||||
if arg.toolboxdeployed is True:
|
||||
kube = get_cmd_prefix(arg)
|
||||
cmd = kube + cmd
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get toolbox %s", stderr)
|
||||
@ -451,9 +452,10 @@ def get_subvol_group(arg):
|
||||
cmd += ["--kubeconfig", arg.kubeconfig]
|
||||
cmd += ['get', 'cm', arg.configmap, '-o', 'json']
|
||||
cmd += ['--namespace', arg.configmapnamespace]
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get configmap %s", stderr)
|
||||
@ -463,6 +465,7 @@ def get_subvol_group(arg):
|
||||
except ValueError as err:
|
||||
print(err, stdout)
|
||||
sys.exit()
|
||||
|
||||
# default subvolumeGroup
|
||||
subvol_group = "csi"
|
||||
cm_data = config_map['data'].get('config.json')
|
||||
@ -508,9 +511,10 @@ def get_pv_data(arg, pvname):
|
||||
cmd += ["--kubeconfig", arg.kubeconfig]
|
||||
|
||||
cmd += ['get', 'pv', pvname, '-o', 'json']
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as out:
|
||||
stdout, stderr = out.communicate()
|
||||
|
||||
if stderr is not None:
|
||||
if arg.debug:
|
||||
print("failed to get pv %s", stderr)
|
||||
|
3
vendor/github.com/kubernetes-csi/csi-lib-utils/connection/connection.go
generated
vendored
3
vendor/github.com/kubernetes-csi/csi-lib-utils/connection/connection.go
generated
vendored
@ -84,7 +84,8 @@ func ExitOnConnectionLoss() func() bool {
|
||||
if err := ioutil.WriteFile(terminationLogPath, []byte(terminationMsg), 0644); err != nil {
|
||||
klog.Errorf("%s: %s", terminationLogPath, err)
|
||||
}
|
||||
klog.Fatalf(terminationMsg)
|
||||
klog.Exit(terminationMsg)
|
||||
// Not reached.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -176,7 +176,7 @@ github.com/inconshreveable/mousetrap
|
||||
github.com/jmespath/go-jmespath
|
||||
# github.com/json-iterator/go v1.1.11
|
||||
github.com/json-iterator/go
|
||||
# github.com/kubernetes-csi/csi-lib-utils v0.9.1
|
||||
# github.com/kubernetes-csi/csi-lib-utils v0.10.0
|
||||
## explicit
|
||||
github.com/kubernetes-csi/csi-lib-utils/connection
|
||||
github.com/kubernetes-csi/csi-lib-utils/metrics
|
||||
|
Loading…
Reference in New Issue
Block a user