mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 10:49:30 +00:00
Merge pull request #97 from ceph/devel
sync downstream devel branch with upstream
This commit is contained in:
commit
f219445b67
4
Makefile
4
Makefile
@ -47,8 +47,8 @@ endif
|
||||
GO_PROJECT=github.com/ceph/ceph-csi
|
||||
|
||||
CEPH_VERSION ?= $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION})
|
||||
# TODO: ceph_preview tag may be removed with go-ceph 0.16.0
|
||||
# TODO: ceph_ci_untested is added for NFS-export management (go-ceph#655)
|
||||
# TODO: ceph_preview tag may be removed with go-ceph 0.17.0
|
||||
# TODO: ceph_ci_untested is added for subvolume metadata (go-ceph#691) and snapshot metadata management (go-ceph#698)
|
||||
GO_TAGS_LIST ?= $(CEPH_VERSION) ceph_preview ceph_ci_untested
|
||||
|
||||
# go build flags
|
||||
|
@ -59,7 +59,6 @@ func getConfig() *retestConfig {
|
||||
if len(strings.Split(os.Getenv("GITHUB_REPOSITORY"), "/")) == 2 {
|
||||
return strings.Split(os.Getenv("GITHUB_REPOSITORY"), "/")[0], strings.Split(os.Getenv("GITHUB_REPOSITORY"), "/")[1]
|
||||
}
|
||||
|
||||
}
|
||||
return "", ""
|
||||
}()
|
||||
@ -168,7 +167,7 @@ func main() {
|
||||
log.Printf("failed to create comment %v\n", err)
|
||||
continue
|
||||
}
|
||||
//Post comment with target URL for retesting
|
||||
// Post comment with target URL for retesting
|
||||
msg = fmt.Sprintf("@%s %q test failed. Logs are available at [location](%s) for debugging", re.GetUser().GetLogin(), r.GetContext(), r.GetTargetURL())
|
||||
comment.Body = github.String(msg)
|
||||
_, _, err = c.client.Issues.CreateComment(context.TODO(), c.owner, c.repo, prNumber, comment)
|
||||
@ -199,7 +198,8 @@ func main() {
|
||||
|
||||
// checkPRRequiredApproval check PullRequest has required approvals.
|
||||
func (c *retestConfig) checkPRRequiredApproval(prNumber int) bool {
|
||||
rev, _, err := c.client.PullRequests.ListReviews(context.TODO(), c.owner, c.repo, prNumber, &github.ListOptions{})
|
||||
opts := github.ListOptions{PerPage: 100} // defaults to 30 reviews, too few sometimes
|
||||
rev, _, err := c.client.PullRequests.ListReviews(context.TODO(), c.owner, c.repo, prNumber, &opts)
|
||||
if err != nil {
|
||||
log.Printf("failed to list reviews %v\n", err)
|
||||
return false
|
||||
|
16
build.env
16
build.env
@ -16,18 +16,18 @@ BASE_IMAGE=quay.io/ceph/ceph:v17
|
||||
CEPH_VERSION=quincy
|
||||
|
||||
# standard Golang options
|
||||
GOLANG_VERSION=1.17.5
|
||||
GOLANG_VERSION=1.17.10
|
||||
GO111MODULE=on
|
||||
|
||||
# commitlint version
|
||||
COMMITLINT_VERSION=latest
|
||||
|
||||
# static checks and linters
|
||||
GOLANGCI_VERSION=v1.43.0
|
||||
GOLANGCI_VERSION=v1.46.2
|
||||
|
||||
# external snapshotter version
|
||||
# Refer: https://github.com/kubernetes-csi/external-snapshotter/releases
|
||||
SNAPSHOT_VERSION=v5.0.1
|
||||
SNAPSHOT_VERSION=v6.0.1
|
||||
|
||||
# "go test" configuration
|
||||
# set to stdout or html to enable coverage reporting, disabled by default
|
||||
@ -35,21 +35,21 @@ SNAPSHOT_VERSION=v5.0.1
|
||||
#GO_COVER_DIR=_output/
|
||||
|
||||
# helm chart generation, testing and publishing
|
||||
HELM_VERSION=v3.8.2
|
||||
HELM_VERSION=v3.9.0
|
||||
|
||||
# minikube settings
|
||||
MINIKUBE_VERSION=v1.25.2
|
||||
MINIKUBE_VERSION=v1.26.0-beta.1
|
||||
VM_DRIVER=none
|
||||
CHANGE_MINIKUBE_NONE_USER=true
|
||||
|
||||
# Rook options
|
||||
ROOK_VERSION=v1.8.2
|
||||
ROOK_VERSION=v1.9.4
|
||||
# Provide ceph image path
|
||||
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v16
|
||||
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v17
|
||||
|
||||
# CSI sidecar version
|
||||
CSI_ATTACHER_VERSION=v3.4.0
|
||||
CSI_SNAPSHOTTER_VERSION=v5.0.1
|
||||
CSI_SNAPSHOTTER_VERSION=v6.0.1
|
||||
CSI_PROVISIONER_VERSION=v3.1.0
|
||||
CSI_RESIZER_VERSION=v1.4.0
|
||||
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.5.1
|
||||
|
@ -2,7 +2,7 @@
|
||||
apiVersion: v1
|
||||
appVersion: canary
|
||||
description: "Container Storage Interface (CSI) driver,
|
||||
provisioner, snapshotter and attacher for Ceph cephfs"
|
||||
provisioner, snapshotter and resizer for Ceph cephfs"
|
||||
name: ceph-csi-cephfs
|
||||
version: 3-canary
|
||||
keywords:
|
||||
|
@ -91,7 +91,7 @@ charts and their default values.
|
||||
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
||||
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `nodeplugin.registrar.image.repository` | Node-Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.2.0` |
|
||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.5.1` |
|
||||
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
||||
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
||||
@ -106,20 +106,15 @@ charts and their default values.
|
||||
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v2.2.2` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.1.0` |
|
||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
|
||||
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.2.1` |
|
||||
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
|
||||
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
|
||||
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.2.0` |
|
||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.4.0` |
|
||||
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
||||
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
||||
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v4.1.1` |
|
||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.0.1` |
|
||||
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
||||
| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` |
|
||||
|
@ -7,5 +7,5 @@ kind: CSIDriver
|
||||
metadata:
|
||||
name: {{ .Values.driverName }}
|
||||
spec:
|
||||
attachRequired: true
|
||||
attachRequired: false
|
||||
podInfoOnMount: false
|
||||
|
@ -40,14 +40,6 @@ rules:
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update", "patch"]
|
||||
{{- if .Values.provisioner.attacher.enabled }}
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
{{- end -}}
|
||||
{{- if .Values.provisioner.resizer.enabled }}
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
|
@ -86,24 +86,6 @@ spec:
|
||||
mountPath: /csi
|
||||
resources:
|
||||
{{ toYaml .Values.provisioner.snapshotter.resources | indent 12 }}
|
||||
{{- if .Values.provisioner.attacher.enabled }}
|
||||
- name: csi-attacher
|
||||
image: "{{ .Values.provisioner.attacher.image.repository }}:{{ .Values.provisioner.attacher.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.provisioner.attacher.image.pullPolicy }}
|
||||
args:
|
||||
- "--v={{ .Values.logLevel }}"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election=true"
|
||||
- "--retry-interval-start=500ms"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.provisioner.resizer.enabled }}
|
||||
- name: csi-resizer
|
||||
image: "{{ .Values.provisioner.resizer.image.repository }}:{{ .Values.provisioner.resizer.image.tag }}"
|
||||
|
@ -166,15 +166,6 @@ provisioner:
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
attacher:
|
||||
name: attacher
|
||||
enabled: true
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/csi-attacher
|
||||
tag: v3.4.0
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
resizer:
|
||||
name: resizer
|
||||
enabled: true
|
||||
@ -187,7 +178,7 @@ provisioner:
|
||||
snapshotter:
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||
tag: v4.2.0
|
||||
tag: v6.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
apiVersion: v1
|
||||
appVersion: canary
|
||||
description: "Container Storage Interface (CSI) driver,
|
||||
provisioner, snapshotter, and attacher for Ceph RBD"
|
||||
provisioner, snapshotter, resizer and attacher for Ceph RBD"
|
||||
name: ceph-csi-rbd
|
||||
version: 3-canary
|
||||
keywords:
|
||||
|
@ -93,7 +93,7 @@ charts and their default values.
|
||||
| `nodeplugin.priorityClassName` | Set user created priorityclassName for csi plugin pods. default is system-node-critical which is highest priority | `system-node-critical` |
|
||||
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
|
||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.2.0` |
|
||||
| `nodeplugin.registrar.image.tag` | Image tag | `v2.5.1` |
|
||||
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
|
||||
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
|
||||
@ -111,23 +111,24 @@ charts and their default values.
|
||||
| `provisioner.minSnapshotsOnImage` | Minimum number of snapshots allowed on rbd image to trigger flattening | `250` |
|
||||
| `provisioner.skipForceFlatten` | Skip image flattening if kernel support mapping of rbd images which has the deep-flatten feature | `false` |
|
||||
| `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` |
|
||||
| `provisioner.clustername` | Cluster name to set on the RBD image | "" |
|
||||
| `provisioner.priorityClassName` | Set user created priorityclassName for csi provisioner pods. Default is `system-cluster-critical` which is less priority than `system-node-critical` | `system-cluster-critical` |
|
||||
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
|
||||
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `v2.2.2` |
|
||||
| `provisioner.provisioner.image.tag` | Specifies image tag | `canary` |
|
||||
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
|
||||
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.2.1` |
|
||||
| `provisioner.attacher.image.tag` | Specifies image tag | `v3.4.0` |
|
||||
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
|
||||
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
|
||||
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
|
||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.2.0` |
|
||||
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.4.0` |
|
||||
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
|
||||
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
|
||||
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
|
||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v4.1.1` |
|
||||
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.0.1` |
|
||||
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
|
||||
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |
|
||||
| `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` |
|
||||
|
@ -31,4 +31,7 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["list", "get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts/token"]
|
||||
verbs: ["create"]
|
||||
{{- end -}}
|
||||
|
@ -70,5 +70,8 @@ rules:
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end }}
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts/token"]
|
||||
verbs: ["create"]
|
||||
|
||||
{{- end -}}
|
||||
|
@ -152,6 +152,9 @@ spec:
|
||||
{{- if .Values.provisioner.profiling.enabled }}
|
||||
- "--enableprofiling={{ .Values.provisioner.profiling.enabled }}"
|
||||
{{- end }}
|
||||
{{- if .Values.provisioner.clustername }}
|
||||
- "--clustername={{ .Values.provisioner.clustername }}"
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
@ -199,6 +202,9 @@ spec:
|
||||
- "--v={{ .Values.logLevel }}"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--drivernamespace=$(DRIVER_NAMESPACE)"
|
||||
{{- if .Values.provisioner.clustername }}
|
||||
- "--clustername={{ .Values.provisioner.clustername }}"
|
||||
{{- end }}
|
||||
env:
|
||||
- name: DRIVER_NAMESPACE
|
||||
valueFrom:
|
||||
|
@ -144,6 +144,8 @@ provisioner:
|
||||
deployController: true
|
||||
# Timeout for waiting for creation or deletion of a volume
|
||||
timeout: 60s
|
||||
# cluster name to set on the RBD image
|
||||
# clustername: "k8s-cluster-1"
|
||||
# Hard limit for maximum number of nested volume clones that are taken before
|
||||
# a flatten occurs
|
||||
hardMaxCloneDepth: 8
|
||||
@ -229,7 +231,7 @@ provisioner:
|
||||
snapshotter:
|
||||
image:
|
||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||
tag: v4.2.0
|
||||
tag: v6.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
|
||||
|
@ -68,6 +68,7 @@ func init() {
|
||||
flag.StringVar(&conf.NodeID, "nodeid", "", "node id")
|
||||
flag.StringVar(&conf.PluginPath, "pluginpath", defaultPluginPath, "plugin path")
|
||||
flag.StringVar(&conf.StagingPath, "stagingpath", defaultStagingPath, "staging path")
|
||||
flag.StringVar(&conf.ClusterName, "clustername", "", "name of the cluster")
|
||||
flag.StringVar(&conf.InstanceID, "instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+
|
||||
" instances, when sharing Ceph clusters across CSI instances for provisioning")
|
||||
flag.IntVar(&conf.PidLimit, "pidlimit", 0, "the PID limit to configure through cgroups")
|
||||
@ -249,6 +250,7 @@ func main() {
|
||||
cfg := controller.Config{
|
||||
DriverName: dname,
|
||||
Namespace: conf.DriverNamespace,
|
||||
ClusterName: conf.ClusterName,
|
||||
}
|
||||
// initialize all controllers before starting.
|
||||
initControllers()
|
||||
|
@ -77,7 +77,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
@ -90,20 +90,6 @@ spec:
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-cephfsplugin-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election=true"
|
||||
- "--retry-interval-start=500ms"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-cephfsplugin
|
||||
# for stable functionality replace canary with latest release version
|
||||
image: quay.io/cephcsi/cephcsi:canary
|
||||
|
@ -37,12 +37,6 @@ rules:
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["update", "patch"]
|
||||
|
@ -6,5 +6,5 @@ kind: CSIDriver
|
||||
metadata:
|
||||
name: cephfs.csi.ceph.com
|
||||
spec:
|
||||
attachRequired: true
|
||||
attachRequired: false
|
||||
podInfoOnMount: false
|
||||
|
@ -30,6 +30,9 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["list", "get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts/token"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -63,6 +63,9 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts/token"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
@ -71,7 +71,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
|
61
docs/cephfs-snapshot-backed-volumes.md
Normal file
61
docs/cephfs-snapshot-backed-volumes.md
Normal file
@ -0,0 +1,61 @@
|
||||
# Provisioning and mounting CephFS snapshot-backed volumes
|
||||
|
||||
Snapshot-backed volumes allow CephFS subvolume snapshots to be exposed as
|
||||
regular read-only PVCs. No data cloning is performed and provisioning such
|
||||
volumes is done in constant time.
|
||||
|
||||
For more details please refer to [Snapshots as shallow read-only volumes](./design/proposals/cephfs-snapshot-shallow-ro-vol.md)
|
||||
design document.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Prerequisites for this feature are the same as for creating PVCs with snapshot
|
||||
volume source. See [Create snapshot and Clone Volume](./snap-clone.md) for more
|
||||
information.
|
||||
|
||||
## Usage
|
||||
|
||||
### Provisioning a snapshot-backed volume from a volume snapshot
|
||||
|
||||
For provisioning new snapshot-backed volumes, following configuration must be
|
||||
set for storage class(es) and their PVCs respectively:
|
||||
|
||||
* StorageClass:
|
||||
* Specify `backingSnapshot: "true"` parameter.
|
||||
* PersistentVolumeClaim:
|
||||
* Set `storageClassName` to point to your storage class with backing
|
||||
snapshots enabled.
|
||||
* Define `spec.dataSource` for your desired source volume snapshot.
|
||||
* Set `spec.accessModes` to `ReadOnlyMany`. This is the only access mode that
|
||||
is supported by this feature.
|
||||
|
||||
### Mounting snapshots from pre-provisioned volumes
|
||||
|
||||
Steps for defining a PersistentVolume and PersistentVolumeClaim for
|
||||
pre-provisioned CephFS subvolumes are identical to those described in
|
||||
[Static PVC with ceph-csi](./static-pvc.md), except one additional parameter
|
||||
must be specified: `backingSnapshotID`. CephFS-CSI driver will retrieve the
|
||||
snapshot identified by the given ID from within the specified subvolume, and
|
||||
expose it to workloads in read-only mode. Volume access mode must be set to
|
||||
`ReadOnlyMany`.
|
||||
|
||||
Note that the snapshot retrieval is done by traversing `<rootPath>/.snap` and
|
||||
searching for a directory that contains `backingSnapshotID` value in its name.
|
||||
The specified snapshot ID does not necessarily need to be the complete directory
|
||||
name inside `<rootPath>/.snap`, however it must be complete enough to uniquely
|
||||
identify that directory.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ ls .snap
|
||||
_f279df14-6729-4342-b82f-166f45204233_1099511628283
|
||||
_a364870e-6729-4342-b82f-166f45204233_1099635085072
|
||||
```
|
||||
|
||||
`f279df14-6729-4342-b82f-166f45204233` would be considered a valid value for
|
||||
`backingSnapshotID` volume parameter, whereas `6729-4342-b82f-166f45204233`
|
||||
would not, as it would be ambiguous.
|
||||
|
||||
If the given snapshot ID is ambiguous, or no such snapshot is found, mounting
|
||||
the PVC will fail with INVALID_ARGUMENT error code.
|
@ -81,6 +81,7 @@ you're running it inside a k8s cluster and find the config itself).
|
||||
| `pool` | no | Ceph pool into which volume data shall be stored |
|
||||
| `volumeNamePrefix` | no | Prefix to use for naming subvolumes (defaults to `csi-vol-`). |
|
||||
| `snapshotNamePrefix` | no | Prefix to use for naming snapshots (defaults to `csi-snap-`) |
|
||||
| `backingSnapshot` | no | Boolean value. The PVC shall be backed by the CephFS snapshot specified in its data source. `pool` parameter must not be specified. (defaults to `false`) |
|
||||
| `kernelMountOptions` | no | Comma separated string of mount options accepted by cephfs kernel mounter, by default no options are passed. Check man mount.ceph for options. |
|
||||
| `fuseMountOptions` | no | Comma separated string of mount options accepted by ceph-fuse mounter, by default no options are passed. |
|
||||
| `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | Name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value |
|
||||
|
@ -40,6 +40,7 @@ make image-cephcsi
|
||||
| `--enablegrpcmetrics` | `false` | [Deprecated] Enable grpc metrics collection and start prometheus server |
|
||||
| `--polltime` | `"60s"` | Time interval in between each poll |
|
||||
| `--timeout` | `"3s"` | Probe timeout in seconds |
|
||||
| `--clustername` | _empty_ | Cluster name to set on RBD image |
|
||||
| `--histogramoption` | `0.5,2,6` | [Deprecated] Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
|
||||
| `--domainlabels` | _empty_ | Kubernetes node labels to use as CSI domain labels for topology aware provisioning, should be a comma separated value (ex:= "failure-domain/region,failure-domain/zone") |
|
||||
| `--rbdhardmaxclonedepth` | `8` | Hard limit for maximum number of nested volume clones that are taken before a flatten occurs |
|
||||
@ -65,6 +66,9 @@ make image-cephcsi
|
||||
| `mounter` | no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images |
|
||||
| `encrypted` | no | disabled by default, use `"true"` to enable LUKS encryption on PVC and `"false"` to disable it. **Do not change for existing storageclasses** |
|
||||
| `encryptionKMSID` | no | required if encryption is enabled and a kms is used to store passphrases |
|
||||
| `stripeUnit` | no | stripe unit in bytes |
|
||||
| `stripeCount` | no | objects to stripe over before looping |
|
||||
| `objectSize` | no | object size in bytes |
|
||||
|
||||
**NOTE:** An accompanying CSI configuration file, needs to be provided to the
|
||||
running pods. Refer to [Creating CSI configuration](../examples/README.md#creating-csi-configuration)
|
||||
|
@ -111,7 +111,7 @@ are available while running tests:
|
||||
After the support for snapshot/clone has been added to ceph-csi,
|
||||
you need to follow these steps before running e2e.
|
||||
|
||||
- Install snapshot controller and Beta snapshot CRD
|
||||
- Install snapshot controller and snapshot CRD
|
||||
|
||||
```console
|
||||
./scripts/install-snapshot.sh install
|
||||
|
147
e2e/cephfs.go
147
e2e/cephfs.go
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
. "github.com/onsi/ginkgo" // nolint
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -298,8 +298,6 @@ var _ = Describe(cephfsType, func() {
|
||||
})
|
||||
}
|
||||
By("verify generic ephemeral volume support", func() {
|
||||
// generic ephemeral volume support is beta since v1.21.
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 21) {
|
||||
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
@ -327,7 +325,6 @@ var _ = Describe(cephfsType, func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
By("verify RWOP volume support", func() {
|
||||
@ -517,8 +514,8 @@ var _ = Describe(cephfsType, func() {
|
||||
e2elog.Failf("failed to list pods for Deployment: %v", err)
|
||||
}
|
||||
|
||||
doStat := func(podName string) (stdErr string, err error) {
|
||||
_, stdErr, err = execCommandInContainerByPodName(
|
||||
doStat := func(podName string) (string, error) {
|
||||
_, stdErr, execErr := execCommandInContainerByPodName(
|
||||
f,
|
||||
fmt.Sprintf("stat %s", depl.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath),
|
||||
depl.Namespace,
|
||||
@ -526,7 +523,7 @@ var _ = Describe(cephfsType, func() {
|
||||
depl.Spec.Template.Spec.Containers[0].Name,
|
||||
)
|
||||
|
||||
return stdErr, err
|
||||
return stdErr, execErr
|
||||
}
|
||||
ensureStatSucceeds := func(podName string) error {
|
||||
stdErr, statErr := doStat(podName)
|
||||
@ -1212,6 +1209,142 @@ var _ = Describe(cephfsType, func() {
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
|
||||
validateOmapCount(f, 0, cephfsType, metadataPool, snapsType)
|
||||
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS snapshotclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking snapshot-backed volume", func() {
|
||||
err := createCephFSSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||
appLabels := map[string]string{
|
||||
appKey: appLabel,
|
||||
}
|
||||
app.Labels = appLabels
|
||||
optApp := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]),
|
||||
}
|
||||
err = writeDataInPod(app, &optApp, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to write data: %v", err)
|
||||
}
|
||||
|
||||
appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshot: %v", err)
|
||||
}
|
||||
|
||||
err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to append data: %v", err)
|
||||
}
|
||||
|
||||
parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get SHA512 sum for file: %v", err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{
|
||||
"backingSnapshot": "true",
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
// Snapshot-backed volumes support read-only access modes only.
|
||||
pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
|
||||
appClone, err := loadApp(appClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load application: %v", err)
|
||||
}
|
||||
appCloneLabels := map[string]string{
|
||||
appKey: appCloneLabel,
|
||||
}
|
||||
appClone.Labels = appCloneLabels
|
||||
optAppClone := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]),
|
||||
}
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone.Namespace = f.UniqueName
|
||||
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC and app: %v", err)
|
||||
}
|
||||
|
||||
// Snapshot-backed volume shouldn't contribute to total subvolume count.
|
||||
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
|
||||
|
||||
// Deleting snapshot before deleting pvcClone should succeed. It will be
|
||||
// deleted once all volumes that are backed by this snapshot are gone.
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete snapshot: %v", err)
|
||||
}
|
||||
|
||||
appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
|
||||
snapFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get SHA512 sum for file: %v", err)
|
||||
}
|
||||
|
||||
if parentFileSum == snapFileSum {
|
||||
e2elog.Failf("SHA512 sums of files in parent subvol and snapshot should differ")
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvcClone, appClone)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC or application: %v", err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
|
||||
}
|
||||
|
||||
err = createCephfsStorageClass(f.ClientSet, f, false, nil)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create CephFS storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create a PVC-PVC clone and bind it to an app", func() {
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -60,7 +60,8 @@ func createCephfsStorageClass(
|
||||
c kubernetes.Interface,
|
||||
f *framework.Framework,
|
||||
enablePool bool,
|
||||
params map[string]string) error {
|
||||
params map[string]string,
|
||||
) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
@ -253,7 +254,8 @@ func getSnapName(snapNamespace, snapName string) (string, error) {
|
||||
func deleteBackingCephFSSubvolumeSnapshot(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
snap *snapapi.VolumeSnapshot) error {
|
||||
snap *snapapi.VolumeSnapshot,
|
||||
) error {
|
||||
snapshotName, err := getSnapName(snap.Namespace, snap.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -30,7 +30,8 @@ func validateBiggerCloneFromPVC(f *framework.Framework,
|
||||
pvcPath,
|
||||
appPath,
|
||||
pvcClonePath,
|
||||
appClonePath string) error {
|
||||
appClonePath string,
|
||||
) error {
|
||||
const (
|
||||
size = "1Gi"
|
||||
newSize = "2Gi"
|
||||
|
@ -95,7 +95,8 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
||||
func createCustomConfigMap(
|
||||
c kubernetes.Interface,
|
||||
pluginPath string,
|
||||
clusterInfo map[string]map[string]string) error {
|
||||
clusterInfo map[string]map[string]string,
|
||||
) error {
|
||||
path := pluginPath + configMap
|
||||
cm := v1.ConfigMap{}
|
||||
err := unmarshal(path, &cm)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -40,7 +41,8 @@ func execCommandInPodWithName(
|
||||
cmdString,
|
||||
podName,
|
||||
containerName,
|
||||
nameSpace string) (string, string, error) {
|
||||
nameSpace string,
|
||||
) (string, string, error) {
|
||||
cmd := []string{"/bin/sh", "-c", cmdString}
|
||||
podOpt := framework.ExecOptions{
|
||||
Command: cmd,
|
||||
@ -296,3 +298,180 @@ func (rnr *rookNFSResource) Do(action kubectlAction) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDeploymentUpdateScale(
|
||||
c kubernetes.Interface,
|
||||
ns,
|
||||
deploymentName string,
|
||||
scale *autoscalingv1.Scale,
|
||||
timeout int,
|
||||
) error {
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
scaleResult, upsErr := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(),
|
||||
deploymentName, scale, metav1.UpdateOptions{})
|
||||
if upsErr != nil {
|
||||
if isRetryableAPIError(upsErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment UpdateScale %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error update scale deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
}
|
||||
if scaleResult.Spec.Replicas != scale.Spec.Replicas {
|
||||
e2elog.Logf("scale result not matching for deployment %s/%s, desired scale %d, got %d",
|
||||
ns, deploymentName, scale.Spec.Replicas, scaleResult.Spec.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error scale not matching in deployment %s/%s: %w", ns, deploymentName, upsErr)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed update scale deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDeploymentUpdate(
|
||||
c kubernetes.Interface,
|
||||
deployment *appsv1.Deployment,
|
||||
timeout int,
|
||||
) error {
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
_, upErr := c.AppsV1().Deployments(deployment.Namespace).Update(
|
||||
context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
if upErr != nil {
|
||||
if isRetryableAPIError(upErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment Update %s/%s has not completed yet (%d seconds elapsed)",
|
||||
deployment.Namespace, deployment.Name, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error updating deployment %s/%s: %w",
|
||||
deployment.Namespace, deployment.Name, upErr)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed update deployment %s/%s: %w", deployment.Namespace, deployment.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// contains check if slice contains string.
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func waitForContainersArgsUpdate(
|
||||
c kubernetes.Interface,
|
||||
ns,
|
||||
deploymentName,
|
||||
key,
|
||||
value string,
|
||||
containers []string,
|
||||
timeout int,
|
||||
) error {
|
||||
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)
|
||||
|
||||
// Scale down to 0.
|
||||
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error get scale deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
count := scale.Spec.Replicas
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = 0
|
||||
err = waitForDeploymentUpdateScale(c, ns, deploymentName, scale, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update deployment.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error get deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
cid := deployment.Spec.Template.Spec.Containers // cid: read as containers in deployment
|
||||
for i := range cid {
|
||||
if contains(containers, cid[i].Name) {
|
||||
match := false
|
||||
for j, ak := range cid[i].Args {
|
||||
if ak == key {
|
||||
// do replacement of value
|
||||
match = true
|
||||
cid[i].Args[j] = fmt.Sprintf("--%s=%s", key, value)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
// append a new key value
|
||||
cid[i].Args = append(cid[i].Args, fmt.Sprintf("--%s=%s", key, value))
|
||||
}
|
||||
deployment.Spec.Template.Spec.Containers[i].Args = cid[i].Args
|
||||
}
|
||||
}
|
||||
// clear creationTimestamp, generation, resourceVersion, and uid
|
||||
deployment.CreationTimestamp = metav1.Time{}
|
||||
deployment.Generation = 0
|
||||
deployment.ResourceVersion = "0"
|
||||
deployment.UID = ""
|
||||
err = waitForDeploymentUpdate(c, deployment, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Scale up to count.
|
||||
scale.Spec.Replicas = count
|
||||
err = waitForDeploymentUpdateScale(c, ns, deploymentName, scale, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for scale to become count
|
||||
t := time.Duration(timeout) * time.Minute
|
||||
start := time.Now()
|
||||
err = wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
deploy, getErr := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
if isRetryableAPIError(getErr) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf(
|
||||
"Deployment Get %s/%s has not completed yet (%d seconds elapsed)",
|
||||
ns, deploymentName, int(time.Since(start).Seconds()))
|
||||
|
||||
return false, fmt.Errorf("error getting deployment %s/%s: %w", ns, deploymentName, getErr)
|
||||
}
|
||||
if deploy.Status.Replicas != count {
|
||||
e2elog.Logf("Expected deployment %s/%s replicas %d, got %d", ns, deploymentName, count, deploy.Status.Replicas)
|
||||
|
||||
return false, fmt.Errorf("error expected deployment %s/%s replicas %d, got %d",
|
||||
ns, deploymentName, count, deploy.Status.Replicas)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting deployment %s/%s: %w", ns, deploymentName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
. "github.com/onsi/ginkgo" // nolint
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -143,7 +143,8 @@ func createNFSStorageClass(
|
||||
c clientset.Interface,
|
||||
f *framework.Framework,
|
||||
enablePool bool,
|
||||
params map[string]string) error {
|
||||
params map[string]string,
|
||||
) error {
|
||||
scPath := fmt.Sprintf("%s/%s", nfsExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
|
31
e2e/pod.go
31
e2e/pod.go
@ -136,7 +136,8 @@ func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.
|
||||
func getCommandInPodOpts(
|
||||
f *framework.Framework,
|
||||
c, ns, cn string,
|
||||
opt *metav1.ListOptions) (framework.ExecOptions, error) {
|
||||
opt *metav1.ListOptions,
|
||||
) (framework.ExecOptions, error) {
|
||||
cmd := []string{"/bin/sh", "-c", c}
|
||||
pName, cName, err := findPodAndContainerName(f, ns, cn, opt)
|
||||
if err != nil {
|
||||
@ -161,7 +162,8 @@ func getCommandInPodOpts(
|
||||
// stderr is returned as a string, and err will be set on a failure.
|
||||
func execCommandInDaemonsetPod(
|
||||
f *framework.Framework,
|
||||
c, daemonsetName, nodeName, containerName, ns string) (string, error) {
|
||||
c, daemonsetName, nodeName, containerName, ns string,
|
||||
) (string, error) {
|
||||
selector, err := getDaemonSetLabelSelector(f, ns, daemonsetName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -224,7 +226,8 @@ func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOpti
|
||||
}
|
||||
|
||||
func execCommandInContainer(
|
||||
f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (string, string, error) {
|
||||
f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions,
|
||||
) (string, string, error) {
|
||||
podOpt, err := getCommandInPodOpts(f, c, ns, cn, opt)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
@ -425,6 +428,25 @@ func calculateSHA512sum(f *framework.Framework, app *v1.Pod, filePath string, op
|
||||
return checkSum, nil
|
||||
}
|
||||
|
||||
func appendToFileInContainer(
|
||||
f *framework.Framework,
|
||||
app *v1.Pod,
|
||||
filePath,
|
||||
toAppend string,
|
||||
opt *metav1.ListOptions,
|
||||
) error {
|
||||
cmd := fmt.Sprintf("echo %q >> %s", toAppend, filePath)
|
||||
_, stdErr, err := execCommandInPod(f, cmd, app.Namespace, opt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not append to file %s: %w ; stderr: %s", filePath, err, stdErr)
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("could not append to file %s: %v", filePath, stdErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getKernelVersionFromDaemonset gets the kernel version from the specified container.
|
||||
func getKernelVersionFromDaemonset(f *framework.Framework, ns, dsn, cn string) (string, error) {
|
||||
selector, err := getDaemonSetLabelSelector(f, ns, dsn)
|
||||
@ -468,7 +490,8 @@ func validateRWOPPodCreation(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
app *v1.Pod,
|
||||
baseAppName string) error {
|
||||
baseAppName string,
|
||||
) error {
|
||||
var err error
|
||||
// create one more app with same PVC
|
||||
name := fmt.Sprintf("%s%d", f.UniqueName, deployTimeout)
|
||||
|
@ -239,7 +239,8 @@ func getPersistentVolume(c kubernetes.Interface, name string) (*v1.PersistentVol
|
||||
|
||||
func getPVCAndPV(
|
||||
c kubernetes.Interface,
|
||||
pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error) {
|
||||
pvcName, pvcNamespace string,
|
||||
) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error) {
|
||||
pvc, err := getPersistentVolumeClaim(c, pvcNamespace, pvcName)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get PVC: %w", err)
|
||||
|
192
e2e/rbd.go
192
e2e/rbd.go
@ -211,6 +211,37 @@ func checkGetKeyError(err error, stdErr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// checkClusternameInMetadata check for cluster name metadata on RBD image.
|
||||
// nolint:nilerr // intentionally returning nil on error in the retry loop.
|
||||
func checkClusternameInMetadata(f *framework.Framework, ns, pool, image string) {
|
||||
t := time.Duration(deployTimeout) * time.Minute
|
||||
var (
|
||||
coName string
|
||||
stdErr string
|
||||
execErr error
|
||||
)
|
||||
err := wait.PollImmediate(poll, t, func() (bool, error) {
|
||||
coName, stdErr, execErr = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd image-meta get %s --image=%s %s", rbdOptions(pool), image, clusterNameKey),
|
||||
ns)
|
||||
if execErr != nil || stdErr != "" {
|
||||
e2elog.Logf("failed to get cluster name %s/%s %s: err=%v stdErr=%q",
|
||||
rbdOptions(pool), image, clusterNameKey, execErr, stdErr)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Failf("could not get cluster name %s/%s %s: %v", rbdOptions(pool), image, clusterNameKey, err)
|
||||
}
|
||||
coName = strings.TrimSuffix(coName, "\n")
|
||||
if coName != defaultClusterName {
|
||||
e2elog.Failf("expected coName %q got %q", defaultClusterName, coName)
|
||||
}
|
||||
}
|
||||
|
||||
var _ = Describe("RBD", func() {
|
||||
f := framework.NewDefaultFramework(rbdType)
|
||||
var c clientset.Interface
|
||||
@ -290,6 +321,14 @@ var _ = Describe("RBD", func() {
|
||||
if !util.CheckKernelSupport(kernelRelease, nbdZeroIOtimeoutSupport) {
|
||||
nbdMapOptions = "nbd:debug-rbd=20,io-timeout=330"
|
||||
}
|
||||
|
||||
// wait for cluster name update in deployment
|
||||
containers := []string{"csi-rbdplugin", "csi-rbdplugin-controller"}
|
||||
err = waitForContainersArgsUpdate(c, cephCSINamespace, rbdDeploymentName,
|
||||
"clustername", defaultClusterName, containers, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, rbdDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -453,6 +492,8 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("expected pvName %q got %q", pvcObj.Spec.VolumeName, pvName)
|
||||
}
|
||||
|
||||
checkClusternameInMetadata(f, rookNamespace, defaultRBDPool, imageList[0])
|
||||
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pvc: %v", err)
|
||||
@ -701,6 +742,7 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("PV name found on %s/%s %s=%s: err=%v stdErr=%q",
|
||||
rbdOptions(defaultRBDPool), imageList[0], pvNameKey, pvName, err, stdErr)
|
||||
}
|
||||
checkClusternameInMetadata(f, rookNamespace, defaultRBDPool, imageList[0])
|
||||
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
@ -715,8 +757,6 @@ var _ = Describe("RBD", func() {
|
||||
})
|
||||
|
||||
By("verify generic ephemeral volume support", func() {
|
||||
// generic ephemeral volume support is supported from 1.21
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 21) {
|
||||
// create application
|
||||
app, err := loadApp(appEphemeralPath)
|
||||
if err != nil {
|
||||
@ -742,7 +782,6 @@ var _ = Describe("RBD", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate rbd images in pool %s trash: %v", defaultRBDPool, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
By("validate RBD migration PVC", func() {
|
||||
@ -4041,6 +4080,153 @@ var _ = Describe("RBD", func() {
|
||||
})
|
||||
})
|
||||
|
||||
By("validate rbd image stripe", func() {
|
||||
stripeUnit := 4096
|
||||
stripeCount := 8
|
||||
objectSize := 131072
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
|
||||
err = createRBDStorageClass(
|
||||
f.ClientSet,
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{
|
||||
"stripeUnit": fmt.Sprintf("%d", stripeUnit),
|
||||
"stripeCount": fmt.Sprintf("%d", stripeCount),
|
||||
"objectSize": fmt.Sprintf("%d", objectSize),
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = createRBDSnapshotClass(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err = deleteRBDSnapshotClass()
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete VolumeSnapshotClass: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// create PVC and bind it to an app
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC and application: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
err = validateStripe(f, pvc, stripeUnit, stripeCount, objectSize)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate stripe: %v", err)
|
||||
}
|
||||
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create snapshot: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
// parent PVC + snapshot
|
||||
totalImages := 2
|
||||
validateRBDImageCount(f, totalImages, defaultRBDPool)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, snapsType)
|
||||
pvcClone, err := loadPVC(pvcClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
|
||||
// create clone PVC as ROX
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
// parent pvc + snapshot + clone
|
||||
totalImages = 3
|
||||
validateRBDImageCount(f, totalImages, defaultRBDPool)
|
||||
validateOmapCount(f, 2, rbdType, defaultRBDPool, volumesType)
|
||||
validateOmapCount(f, 1, rbdType, defaultRBDPool, snapsType)
|
||||
err = validateStripe(f, pvcClone, stripeUnit, stripeCount, objectSize)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate stripe for clone: %v", err)
|
||||
}
|
||||
// delete snapshot
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete snapshot: %v", err)
|
||||
}
|
||||
// delete clone pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
|
||||
pvcSmartClone, err := loadPVC(pvcSmartClonePath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load pvcSmartClone: %v", err)
|
||||
}
|
||||
pvcSmartClone.Namespace = f.UniqueName
|
||||
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvcSmartClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create pvc: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
// parent pvc + temp clone + clone
|
||||
totalImages = 3
|
||||
validateRBDImageCount(f, totalImages, defaultRBDPool)
|
||||
validateOmapCount(f, 2, rbdType, defaultRBDPool, volumesType)
|
||||
err = validateStripe(f, pvcSmartClone, stripeUnit, stripeCount, objectSize)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate stripe for clone: %v", err)
|
||||
}
|
||||
// delete parent pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
|
||||
// delete clone pvc
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvcSmartClone, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
validateOmapCount(f, 0, rbdType, defaultRBDPool, volumesType)
|
||||
})
|
||||
|
||||
// Make sure this should be last testcase in this file, because
|
||||
// it deletes pool
|
||||
By("Create a PVC and delete PVC when backend pool deleted", func() {
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
scv1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -117,7 +117,8 @@ func createRBDStorageClass(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
scOptions, parameters map[string]string,
|
||||
policy v1.PersistentVolumeReclaimPolicy) error {
|
||||
policy v1.PersistentVolumeReclaimPolicy,
|
||||
) error {
|
||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
@ -281,7 +282,7 @@ func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string,
|
||||
return "", err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf("%s", stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
@ -757,7 +758,8 @@ func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim,
|
||||
func checkPVCDataPoolForImageInPool(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
pool, dataPool string) error {
|
||||
pool, dataPool string,
|
||||
) error {
|
||||
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -940,3 +942,69 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// imageInfo strongly typed JSON spec for image info.
|
||||
type imageInfo struct {
|
||||
Name string `json:"name"`
|
||||
StripeUnit int `json:"stripe_unit"`
|
||||
StripeCount int `json:"stripe_count"`
|
||||
ObjectSize int `json:"object_size"`
|
||||
}
|
||||
|
||||
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
||||
// error if provided image is not found.
|
||||
func getImageInfo(f *framework.Framework, imageName, poolName string) (imageInfo, error) {
|
||||
// rbd --format=json info [image-spec | snap-spec]
|
||||
var imgInfo imageInfo
|
||||
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(
|
||||
f,
|
||||
fmt.Sprintf("rbd info %s %s --format json", rbdOptions(poolName), imageName),
|
||||
rookNamespace)
|
||||
if err != nil {
|
||||
return imgInfo, fmt.Errorf("failed to get rbd info: %w", err)
|
||||
}
|
||||
if stdErr != "" {
|
||||
return imgInfo, fmt.Errorf("failed to get rbd info: %v", stdErr)
|
||||
}
|
||||
err = json.Unmarshal([]byte(stdOut), &imgInfo)
|
||||
if err != nil {
|
||||
return imgInfo, fmt.Errorf("unmarshal failed: %w. raw buffer response: %s",
|
||||
err, stdOut)
|
||||
}
|
||||
|
||||
return imgInfo, nil
|
||||
}
|
||||
|
||||
// validateStripe validate the stripe count, stripe unit and object size of the
|
||||
// image.
|
||||
func validateStripe(f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
stripeUnit,
|
||||
stripeCount,
|
||||
objectSize int,
|
||||
) error {
|
||||
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgInfo, err := getImageInfo(f, imageData.imageName, defaultRBDPool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if imgInfo.ObjectSize != objectSize {
|
||||
return fmt.Errorf("objectSize %d does not match expected %d", imgInfo.ObjectSize, objectSize)
|
||||
}
|
||||
|
||||
if imgInfo.StripeUnit != stripeUnit {
|
||||
return fmt.Errorf("stripeUnit %d does not match expected %d", imgInfo.StripeUnit, stripeUnit)
|
||||
}
|
||||
|
||||
if imgInfo.StripeCount != stripeCount {
|
||||
return fmt.Errorf("stripeCount %d does not match expected %d", imgInfo.StripeCount, stripeCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1"
|
||||
. "github.com/onsi/gomega" // nolint
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -300,7 +300,8 @@ func validateBiggerPVCFromSnapshot(f *framework.Framework,
|
||||
appPath,
|
||||
snapPath,
|
||||
pvcClonePath,
|
||||
appClonePath string) error {
|
||||
appClonePath string,
|
||||
) error {
|
||||
const (
|
||||
size = "1Gi"
|
||||
newSize = "2Gi"
|
||||
|
@ -41,7 +41,8 @@ const (
|
||||
func getStaticPV(
|
||||
name, volName, size, secretName, secretNS, sc, driverName string,
|
||||
blockPV bool,
|
||||
options, annotations map[string]string, policy v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
|
||||
options, annotations map[string]string, policy v1.PersistentVolumeReclaimPolicy,
|
||||
) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -491,7 +492,8 @@ func validateRBDStaticResize(
|
||||
app *v1.Pod,
|
||||
appOpt *metav1.ListOptions,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
rbdImageName string) error {
|
||||
rbdImageName string,
|
||||
) error {
|
||||
// resize rbd image
|
||||
size := staticPVNewSize
|
||||
cmd := fmt.Sprintf(
|
||||
|
30
e2e/utils.go
30
e2e/utils.go
@ -30,7 +30,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -64,12 +64,17 @@ const (
|
||||
// Default key and label for Listoptions.
|
||||
appKey = "app"
|
||||
appLabel = "write-data-in-pod"
|
||||
appCloneLabel = "app-clone"
|
||||
|
||||
noError = ""
|
||||
// labels/selector used to list/delete rbd pods.
|
||||
rbdPodLabels = "app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)"
|
||||
|
||||
exitOneErr = "command terminated with exit code 1"
|
||||
|
||||
// cluster Name, set by user.
|
||||
clusterNameKey = "csi.ceph.com/cluster/name"
|
||||
defaultClusterName = "k8s-cluster-1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -341,7 +346,8 @@ func createPVCAndApp(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
app *v1.Pod,
|
||||
pvcTimeout int) error {
|
||||
pvcTimeout int,
|
||||
) error {
|
||||
if name != "" {
|
||||
pvc.Name = name
|
||||
app.Name = name
|
||||
@ -361,7 +367,8 @@ func createPVCAndDeploymentApp(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
app *appsv1.Deployment,
|
||||
pvcTimeout int) error {
|
||||
pvcTimeout int,
|
||||
) error {
|
||||
err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -414,7 +421,8 @@ func validatePVCAndDeploymentAppBinding(
|
||||
func deletePVCAndDeploymentApp(
|
||||
f *framework.Framework,
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
app *appsv1.Deployment) error {
|
||||
app *appsv1.Deployment,
|
||||
) error {
|
||||
err := deleteDeploymentApp(f.ClientSet, app.Name, app.Namespace, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -445,7 +453,8 @@ func deletePVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolu
|
||||
func createPVCAndAppBinding(
|
||||
pvcPath, appPath string,
|
||||
f *framework.Framework,
|
||||
pvcTimeout int) (*v1.PersistentVolumeClaim, *v1.Pod, error) {
|
||||
pvcTimeout int,
|
||||
) (*v1.PersistentVolumeClaim, *v1.Pod, error) {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -486,7 +495,7 @@ func getMountType(selector, mountPath string, f *framework.Framework) (string, e
|
||||
return "", err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||
return strings.TrimSpace(stdOut), fmt.Errorf("%s", stdErr)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdOut), nil
|
||||
@ -802,7 +811,8 @@ func validatePVCClone(
|
||||
dataPool string,
|
||||
kms kmsConfig,
|
||||
validatePVC validateFunc,
|
||||
f *framework.Framework) {
|
||||
f *framework.Framework,
|
||||
) {
|
||||
var wg sync.WaitGroup
|
||||
wgErrs := make([]error, totalCount)
|
||||
chErrs := make([]error, totalCount)
|
||||
@ -1013,7 +1023,8 @@ func validatePVCSnapshot(
|
||||
totalCount int,
|
||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
|
||||
kms, restoreKMS kmsConfig, restoreSCName,
|
||||
dataPool string, f *framework.Framework) {
|
||||
dataPool string, f *framework.Framework,
|
||||
) {
|
||||
var wg sync.WaitGroup
|
||||
wgErrs := make([]error, totalCount)
|
||||
chErrs := make([]error, totalCount)
|
||||
@ -1358,7 +1369,8 @@ func validatePVCSnapshot(
|
||||
func validateController(
|
||||
f *framework.Framework,
|
||||
pvcPath, appPath, scPath string,
|
||||
scOptions, scParams map[string]string) error {
|
||||
scOptions, scParams map[string]string,
|
||||
) error {
|
||||
size := "1Gi"
|
||||
poolName := defaultRBDPool
|
||||
expandSize := "10Gi"
|
||||
|
@ -47,6 +47,11 @@ parameters:
|
||||
# If omitted, defaults to "csi-vol-".
|
||||
# volumeNamePrefix: "foo-bar-"
|
||||
|
||||
# (optional) Boolean value. The PVC shall be backed by the CephFS snapshot
|
||||
# specified in its data source. `pool` parameter must not be specified.
|
||||
# (defaults to `false`)
|
||||
# backingSnapshot: "true"
|
||||
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions:
|
||||
|
@ -134,6 +134,14 @@ parameters:
|
||||
# {"domainLabel":"zone","value":"zone1"}]}
|
||||
# ]
|
||||
|
||||
# Image striping, Refer https://docs.ceph.com/en/latest/man/8/rbd/#striping
|
||||
# For more details
|
||||
# (optional) stripe unit in bytes.
|
||||
# stripeUnit: <>
|
||||
# (optional) objects to stripe over before looping.
|
||||
# stripeCount: <>
|
||||
# (optional) The object size in bytes.
|
||||
# objectSize: <>
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions:
|
||||
|
104
go.mod
104
go.mod
@ -4,40 +4,40 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/IBM/keyprotect-go-client v0.7.0
|
||||
github.com/aws/aws-sdk-go v1.44.20
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.5
|
||||
github.com/aws/aws-sdk-go v1.44.28
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7
|
||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||
// TODO: API for managing NFS-exports requires `ceph_ci_untested` build-tag
|
||||
github.com/ceph/go-ceph v0.15.0
|
||||
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
|
||||
github.com/ceph/go-ceph v0.16.0
|
||||
github.com/container-storage-interface/spec v1.6.0
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0
|
||||
github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/vault/api v1.5.0
|
||||
github.com/hashicorp/vault/api v1.6.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.11.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.0
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/stretchr/testify v1.7.2
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158
|
||||
google.golang.org/grpc v1.46.2
|
||||
google.golang.org/grpc v1.47.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
k8s.io/api v0.24.0
|
||||
k8s.io/apimachinery v0.24.0
|
||||
k8s.io/api v0.24.1
|
||||
k8s.io/apimachinery v0.24.1
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.24.0
|
||||
k8s.io/cloud-provider v0.24.1
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
//
|
||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||
//
|
||||
k8s.io/kubernetes v1.24.0
|
||||
k8s.io/mount-utils v0.24.0
|
||||
k8s.io/kubernetes v1.24.1
|
||||
k8s.io/mount-utils v0.24.1
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
||||
)
|
||||
@ -47,11 +47,11 @@ require (
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/armon/go-metrics v0.3.9 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
|
||||
github.com/aws/smithy-go v1.11.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect
|
||||
github.com/aws/smithy-go v1.11.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
@ -73,7 +73,7 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
@ -86,15 +86,15 @@ require (
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/hashicorp/go-version v1.2.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/vault v1.4.2 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.4.1 // indirect
|
||||
github.com/hashicorp/vault/sdk v0.5.0 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
@ -108,7 +108,7 @@ require (
|
||||
github.com/mitchellh/copystructure v1.0.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@ -151,11 +151,11 @@ require (
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.23.0 // indirect
|
||||
k8s.io/apiserver v0.24.0 // indirect
|
||||
k8s.io/component-base v0.24.0 // indirect
|
||||
k8s.io/component-helpers v0.24.0 // indirect
|
||||
k8s.io/apiserver v0.24.1 // indirect
|
||||
k8s.io/component-base v0.24.1 // indirect
|
||||
k8s.io/component-helpers v0.24.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.0.0 // indirect
|
||||
@ -175,31 +175,31 @@ replace (
|
||||
//
|
||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||
//
|
||||
k8s.io/api => k8s.io/api v0.24.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.24.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.24.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.24.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.24.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.24.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.24.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.24.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.24.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.24.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.0
|
||||
k8s.io/api => k8s.io/api v0.24.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.24.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.24.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.24.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.24.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.24.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.24.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.24.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.24.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.24.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.1
|
||||
// layeh.com seems to be misbehaving
|
||||
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||
)
|
||||
|
158
go.sum
158
go.sum
@ -141,20 +141,20 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
|
||||
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.20 h1:nllTRN24EfhDSeKsNbIc6HoC8Ogd2NCJTRB8l84kDlM=
|
||||
github.com/aws/aws-sdk-go v1.44.20/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 h1:uFWgo6mGJI1n17nbcvSc6fxVuR3xLNqvXt12JCnEcT8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 h1:cnsvEKSoHN4oAN7spMMr0zhEW2MHnhAVpmqQg8E6UcM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 h1:b16QW0XWl0jWjLABFc1A+uh145Oqv+xDcObNk0iQgUk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.5 h1:aEMP+STBsvCY1U/yL4pAqUrU4oFli46AdbCXvVbTiHI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.5/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE=
|
||||
github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
|
||||
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
|
||||
github.com/aws/aws-sdk-go v1.44.28 h1:h/OAqEqY18wq//v6h4GNPMmCkxuzSDrWuGyrvSiRqf4=
|
||||
github.com/aws/aws-sdk-go v1.44.28/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 h1:Ah9h1TZD9E2S1LzHpViBO3Jz9FPL5+rmflmb8hXirtI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 h1:Zt7DDk5V7SyQULUUwIKzsROtVzp/kVvcz15uQx/Tkow=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 h1:eeXdGVtXEe+2Jc49+/vAzna3FAQnUD4AagAw8tzbmfc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 h1:0ZxYAZ1cn7Swi/US55VKciCE6RhRHIwCKIWaMLdT6pg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 h1:HLzjwQM9975FQWSF3uENDGHT1gFQm/q3QXu2BYIcI08=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
|
||||
github.com/aws/smithy-go v1.11.3 h1:DQixirEFM9IaKxX1olZ3ke3nvxRS2xMDteKIDWxozW8=
|
||||
github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -182,8 +182,8 @@ github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
|
||||
github.com/ceph/go-ceph v0.15.0 h1:ILB3NaLWOtt4u/2d8I8HZTC4Ycm1PsOYVar3IFU1xlo=
|
||||
github.com/ceph/go-ceph v0.15.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY=
|
||||
github.com/ceph/go-ceph v0.16.0 h1:hEhVfEFsLoGJF+i3r7Wwh4QlMN+MnWqNxfic9v6GV04=
|
||||
github.com/ceph/go-ceph v0.16.0/go.mod h1:SzhpLdyU+ixxJ68bbqoEa481P5N5d5lv5jVMxcRMLfU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
@ -404,9 +404,9 @@ github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhD
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
|
||||
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@ -465,8 +465,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
@ -592,12 +592,14 @@ github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mX
|
||||
github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc=
|
||||
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5 h1:MBgwAFPUbfuI0+tmDU/aeM1MARvdbqWmiieXIalKqDE=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60=
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1 h1:Yc026VyMyIpq1UWRnakHRG01U8fJm+nEfEmjoAb00n8=
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
@ -662,8 +664,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
||||
github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28=
|
||||
github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM=
|
||||
github.com/hashicorp/vault/api v1.6.0 h1:B8UUYod1y1OoiGHq9GtpiqSnGOUEWHaA26AY8RQEDY4=
|
||||
github.com/hashicorp/vault/api v1.6.0/go.mod h1:h1K70EO2DgnBaTz5IsL6D5ERsNt5Pce93ueVS2+t0Xc=
|
||||
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
|
||||
@ -673,8 +675,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10=
|
||||
github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo=
|
||||
github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0=
|
||||
github.com/hashicorp/vault/sdk v0.5.0 h1:EED7p0OCU3OY5SAqJwSANofY1YKMytm+jDHDQ2EzGVQ=
|
||||
github.com/hashicorp/vault/sdk v0.5.0/go.mod h1:UJZHlfwj7qUJG8g22CuxUgkdJouFrBNvBHCyx8XAPdo=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
@ -760,8 +762,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.11.0 h1:FHWOBtAZBA/hVk7v/qaXgG9Sxv0/n06DebPFuDwumqg=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.11.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.0 h1:05QzRsnbMQ1Ymg/7iJj1k6RVw+rgelB60Ud5j4GgmGM=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.0/go.mod h1:tnHiLn3P10N95fjn7O40QH5ovN0EFGAxqdTpUMrX6bU=
|
||||
github.com/layeh/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
@ -825,8 +827,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
|
||||
@ -1080,8 +1082,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
@ -1110,6 +1112,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
@ -1303,6 +1306,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@ -1394,7 +1398,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1426,6 +1429,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -1529,11 +1533,11 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
@ -1665,8 +1669,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
@ -1723,8 +1727,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
@ -1737,28 +1742,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg=
|
||||
k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
|
||||
k8s.io/apiextensions-apiserver v0.24.0 h1:JfgFqbA8gKJ/uDT++feAqk9jBIwNnL9YGdQvaI9DLtY=
|
||||
k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM=
|
||||
k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ=
|
||||
k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apiserver v0.24.0 h1:GR7kGsjOMfilRvlG3Stxv/3uz/ryvJ/aZXc5pqdsNV0=
|
||||
k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA=
|
||||
k8s.io/cli-runtime v0.24.0/go.mod h1:9XxoZDsEkRFUThnwqNviqzljtT/LdHtNWvcNFrAXl0A=
|
||||
k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U=
|
||||
k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw=
|
||||
k8s.io/cloud-provider v0.24.0 h1:kQ6zB2oy0VDl+6vdRAKEbtwDM1MmuhNCyA/v+Fk2g30=
|
||||
k8s.io/cloud-provider v0.24.0/go.mod h1:cqkEWJWzToaqtS5ti8KQJQcL2IWssWGXHzicxZyaC6s=
|
||||
k8s.io/cluster-bootstrap v0.24.0/go.mod h1:xw+IfoaUweMCAoi+VYhmqkcjii2G7gNg59dmGn7hi0g=
|
||||
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||
k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g=
|
||||
k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA=
|
||||
k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I=
|
||||
k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c=
|
||||
k8s.io/controller-manager v0.24.0/go.mod h1:ageMNQZc7cNH0FF1oarm7wZs6XyJj/V82nNVmgPaeDU=
|
||||
k8s.io/cri-api v0.24.0/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig=
|
||||
k8s.io/csi-translation-lib v0.24.0/go.mod h1:jJaC3a1tI3IShByiAQmOOCl5PKpiZ51Vh70c9Eg2msM=
|
||||
k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY=
|
||||
k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ=
|
||||
k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0=
|
||||
k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q=
|
||||
k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I=
|
||||
k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apiserver v0.24.1 h1:LAA5UpPOeaREEtFAQRUQOI3eE5So/j5J3zeQJjeLdz4=
|
||||
k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0=
|
||||
k8s.io/cli-runtime v0.24.1/go.mod h1:14aVvCTqkA7dNXY51N/6hRY3GUjchyWDOwW84qmR3bs=
|
||||
k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E=
|
||||
k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8=
|
||||
k8s.io/cloud-provider v0.24.1 h1:SaQNq2Ax+epdY9wFngwN9GWpOVnM72hUqr2qy20cOvg=
|
||||
k8s.io/cloud-provider v0.24.1/go.mod h1:h5m/KIiwiQ76hpUBsgrwm/rxteIfJG9kJQ/+/w1as2M=
|
||||
k8s.io/cluster-bootstrap v0.24.1/go.mod h1:uq2PiYfKh8ZLb6DBU/3/2Z1DkMqXkTOHLemalC4tOgE=
|
||||
k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||
k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts=
|
||||
k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38=
|
||||
k8s.io/component-helpers v0.24.1 h1:pk68RSRhkGX75nhtAkilguKbq/0MyXbQqmrZoQu4nbs=
|
||||
k8s.io/component-helpers v0.24.1/go.mod h1:q5Z1pWV/QfX9ThuNeywxasiwkLw9KsR4Q9TAOdb/Y3s=
|
||||
k8s.io/controller-manager v0.24.1/go.mod h1:g105ENexD6A2holEq7Bl6ae+69LJHiLnoEEm7wkE6sc=
|
||||
k8s.io/cri-api v0.24.1/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig=
|
||||
k8s.io/csi-translation-lib v0.24.1/go.mod h1:16nY6xx3XR4+TASMfTtake2ouK1IPz0t/baNmngzR4I=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
@ -1772,28 +1777,28 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-aggregator v0.24.0/go.mod h1:ftfs6Fi46z3cKzeF2kvNBPLbMlSKuqZbesJGNp/cQnw=
|
||||
k8s.io/kube-controller-manager v0.24.0/go.mod h1:s0pbwI8UuBEDdXQbTUpQdNIyU4rQ7jOxaXAcRBoWpJQ=
|
||||
k8s.io/kube-aggregator v0.24.1/go.mod h1:vZvRALCO32hrIuREhkYwLq5Crc0zh6SxzJDAKrQM1+k=
|
||||
k8s.io/kube-controller-manager v0.24.1/go.mod h1:IlXY8FozezzIBNcfA6TV1//fjz9gNy3LGbigDnX7Q3A=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
k8s.io/kube-proxy v0.24.0/go.mod h1:OZ1k9jSwW94Rmj5hepCFea7qlGvvU+bfcosc6+dcFKA=
|
||||
k8s.io/kube-scheduler v0.24.0/go.mod h1:DUq+fXaC51N1kl2YnT2EZSxOph6JOmIJe/pQe5keZPc=
|
||||
k8s.io/kubectl v0.24.0 h1:nA+WtMLVdXUs4wLogGd1mPTAesnLdBpCVgCmz3I7dXo=
|
||||
k8s.io/kubectl v0.24.0/go.mod h1:pdXkmCyHiRTqjYfyUJiXtbVNURhv0/Q1TyRhy2d5ic0=
|
||||
k8s.io/kubelet v0.24.0 h1:fH+D6mSr4DGIeHp/O2+mCEJhkVq3Gpgv9BVOHI+GrWY=
|
||||
k8s.io/kubelet v0.24.0/go.mod h1:p3BBacmHTCMpUf+nluhlyzuGHmONKAspqCvpu9oPAyA=
|
||||
k8s.io/kubernetes v1.24.0 h1:9qRjlCuMjooyFTXLxduMBT+MZSdROWa3idI1AXZirVs=
|
||||
k8s.io/kubernetes v1.24.0/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0=
|
||||
k8s.io/legacy-cloud-providers v0.24.0/go.mod h1:j2gujMUYBEtbYfJaL8JUOgInzERm9fxJwEaOkZcnEUk=
|
||||
k8s.io/metrics v0.24.0/go.mod h1:jrLlFGdKl3X+szubOXPG0Lf2aVxuV3QJcbsgVRAM6fI=
|
||||
k8s.io/mount-utils v0.24.0 h1:1SCkAY99QUchRa00HkLcm0HXajy8xlWHvue4wYdvBVU=
|
||||
k8s.io/mount-utils v0.24.0/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI=
|
||||
k8s.io/pod-security-admission v0.24.0 h1:nTZtZPdJ5ZusFyuxGZxfGxQ5piuhJyxuG5YmVUWG/Gs=
|
||||
k8s.io/pod-security-admission v0.24.0/go.mod h1:YBS4mAdoba2qMvLPE3S7eMIxGlqUf4amHH26jUUqXX4=
|
||||
k8s.io/sample-apiserver v0.24.0/go.mod h1:6YGSatoHMHIac/2dTtARwYH8PVWY5qq1L9ZYbxZ9lHY=
|
||||
k8s.io/kube-proxy v0.24.1/go.mod h1:Q19uL+muS7Q0rxIXlddcanbGcogbDcX5I86GROhrwOM=
|
||||
k8s.io/kube-scheduler v0.24.1/go.mod h1:mxSsC5sg710qdrN9oY+OSkHRSgYOv6qA2vEEt1t6Ax4=
|
||||
k8s.io/kubectl v0.24.1 h1:gxcjHrnwntV1c+G/BHWVv4Mtk8CQJ0WTraElLBG+ddk=
|
||||
k8s.io/kubectl v0.24.1/go.mod h1:NzFqQ50B004fHYWOfhHTrAm4TY6oGF5FAAL13LEaeUI=
|
||||
k8s.io/kubelet v0.24.1 h1:CLgXZ9kKDQoNQFSwKk6vUE5gXNaX1/s8VM8Oq/P5S+o=
|
||||
k8s.io/kubelet v0.24.1/go.mod h1:LShXfjNO1or7ktsorODSOu8+Kd5dHzWF3DtVLXeP3JE=
|
||||
k8s.io/kubernetes v1.24.1 h1:cfRZCNrJN9hR49SBSGLHhn+IdAcfx6OVXadGvWuvYaM=
|
||||
k8s.io/kubernetes v1.24.1/go.mod h1:8e8maMiZzBR2/8Po5Uulx+MXZUYJuN3vtKwD4Ct1Xi0=
|
||||
k8s.io/legacy-cloud-providers v0.24.1/go.mod h1:OeDg+OJ5uzmJQyh6vpCkwGY8tVegaiokWErGr7YlSaI=
|
||||
k8s.io/metrics v0.24.1/go.mod h1:vMs5xpcOyY9D+/XVwlaw8oUHYCo6JTGBCZfyXOOkAhE=
|
||||
k8s.io/mount-utils v0.24.1 h1:juKCvkiP4sWklb72OIk/qW7UhDns41ldcR/EHu/T1uA=
|
||||
k8s.io/mount-utils v0.24.1/go.mod h1:XrSqB3a2e8sq+aU+rlbcBtQ3EgcuDk5RP9ZsGxjoDrI=
|
||||
k8s.io/pod-security-admission v0.24.1 h1:CNcUKc06PgejhdvK1rqBgo5xcpirsl3O574cfKt4hxk=
|
||||
k8s.io/pod-security-admission v0.24.1/go.mod h1:ZH6e17BuFFdiYHFxn9X6d7iaPj3JyuqBOw/MRytVWp8=
|
||||
k8s.io/sample-apiserver v0.24.1/go.mod h1:5L12FaHPjpJzr0s/ClAx61Ig5uBjDCvthtmTIORu7F8=
|
||||
k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
@ -1823,6 +1828,7 @@ sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9W
|
||||
sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg=
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/k8s"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
rterrors "github.com/ceph/ceph-csi/internal/util/reftracker/errors"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
@ -59,48 +60,19 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
volOptions,
|
||||
parentVolOpt *store.VolumeOptions,
|
||||
pvID *store.VolumeIdentifier,
|
||||
sID *store.SnapshotIdentifier) error {
|
||||
sID *store.SnapshotIdentifier,
|
||||
) error {
|
||||
var err error
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
|
||||
if sID != nil {
|
||||
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
|
||||
snap := core.Snapshot{
|
||||
SnapshotID: sID.FsSnapshotName,
|
||||
SubVolume: &parentVolOpt.SubVolume,
|
||||
return cs.createBackingVolumeFromSnapshotSource(ctx, volOptions, parentVolOpt, volClient, sID)
|
||||
}
|
||||
|
||||
err = volClient.CreateCloneFromSnapshot(ctx, snap)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
if parentVolOpt != nil {
|
||||
if err = cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
|
||||
err = volClient.CreateCloneFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
|
||||
|
||||
return err
|
||||
return cs.createBackingVolumeFromVolumeSource(ctx, parentVolOpt, volClient, pvID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err = volClient.CreateVolume(ctx); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
|
||||
@ -110,10 +82,71 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) createBackingVolumeFromSnapshotSource(
|
||||
ctx context.Context,
|
||||
volOptions *store.VolumeOptions,
|
||||
parentVolOpt *store.VolumeOptions,
|
||||
volClient core.SubVolumeClient,
|
||||
sID *store.SnapshotIdentifier,
|
||||
) error {
|
||||
if err := cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
|
||||
|
||||
if volOptions.BackingSnapshot {
|
||||
if err := store.AddSnapshotBackedVolumeRef(ctx, volOptions); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create snapshot-backed volume from snapshot %s: %v",
|
||||
sID.FsSnapshotName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := volClient.CreateCloneFromSnapshot(ctx, core.Snapshot{
|
||||
SnapshotID: sID.FsSnapshotName,
|
||||
SubVolume: &parentVolOpt.SubVolume,
|
||||
})
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) createBackingVolumeFromVolumeSource(
|
||||
ctx context.Context,
|
||||
parentVolOpt *store.VolumeOptions,
|
||||
volClient core.SubVolumeClient,
|
||||
pvID *store.VolumeIdentifier,
|
||||
) error {
|
||||
if err := cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
|
||||
|
||||
if err := volClient.CreateCloneFromSubvolume(ctx, &parentVolOpt.SubVolume); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkContentSource(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*store.VolumeOptions, *store.VolumeIdentifier, *store.SnapshotIdentifier, error) {
|
||||
cr *util.Credentials,
|
||||
) (*store.VolumeOptions, *store.VolumeIdentifier, *store.SnapshotIdentifier, error) {
|
||||
if req.VolumeContentSource == nil {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
@ -155,7 +188,9 @@ func checkValidCreateVolumeRequest(
|
||||
vol,
|
||||
parentVol *store.VolumeOptions,
|
||||
pvID *store.VolumeIdentifier,
|
||||
sID *store.SnapshotIdentifier) error {
|
||||
sID *store.SnapshotIdentifier,
|
||||
req *csi.CreateVolumeRequest,
|
||||
) error {
|
||||
switch {
|
||||
case pvID != nil:
|
||||
if vol.Size < parentVol.Size {
|
||||
@ -165,6 +200,10 @@ func checkValidCreateVolumeRequest(
|
||||
parentVol.Size,
|
||||
vol.Size)
|
||||
}
|
||||
|
||||
if vol.BackingSnapshot {
|
||||
return errors.New("cloning snapshot-backed volumes is currently not supported")
|
||||
}
|
||||
case sID != nil:
|
||||
if vol.Size < parentVol.Size {
|
||||
return fmt.Errorf(
|
||||
@ -173,6 +212,25 @@ func checkValidCreateVolumeRequest(
|
||||
parentVol.Size,
|
||||
vol.Size)
|
||||
}
|
||||
|
||||
if vol.BackingSnapshot {
|
||||
if vol.Size != parentVol.Size {
|
||||
return fmt.Errorf(
|
||||
"cannot create snapshot-backed volume of different size: expected %d bytes, got %d bytes",
|
||||
parentVol.Size,
|
||||
vol.Size,
|
||||
)
|
||||
}
|
||||
|
||||
volCaps := req.GetVolumeCapabilities()
|
||||
for _, volCap := range volCaps {
|
||||
mode := volCap.AccessMode.Mode
|
||||
if mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY &&
|
||||
mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||
return errors.New("backingSnapshot may be used only with read-only access modes")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -182,7 +240,8 @@ func checkValidCreateVolumeRequest(
|
||||
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
||||
func (cs *ControllerServer) CreateVolume(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
req *csi.CreateVolumeRequest,
|
||||
) (*csi.CreateVolumeResponse, error) {
|
||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||
log.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err)
|
||||
|
||||
@ -229,7 +288,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
defer parentVol.Destroy()
|
||||
}
|
||||
|
||||
err = checkValidCreateVolumeRequest(volOptions, parentVol, pvID, sID)
|
||||
err = checkValidCreateVolumeRequest(volOptions, parentVol, pvID, sID, req)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -245,7 +304,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// TODO return error message if requested vol size greater than found volume return error
|
||||
|
||||
if vID != nil {
|
||||
if sID != nil || pvID != nil {
|
||||
if sID != nil || pvID != nil && !volOptions.BackingSnapshot {
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
err = volClient.ExpandVolume(ctx, volOptions.Size)
|
||||
if err != nil {
|
||||
@ -279,8 +338,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
VolumeContext: volumeContext,
|
||||
}
|
||||
if volOptions.Topology != nil {
|
||||
volume.AccessibleTopology =
|
||||
[]*csi.Topology{
|
||||
volume.AccessibleTopology = []*csi.Topology{
|
||||
{
|
||||
Segments: volOptions.Topology,
|
||||
},
|
||||
@ -318,6 +376,11 @@ func (cs *ControllerServer) CreateVolume(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !volOptions.BackingSnapshot {
|
||||
// Get root path for the created subvolume.
|
||||
// Note that root path for snapshot-backed volumes has been already set when
|
||||
// building VolumeOptions.
|
||||
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
volOptions.RootPath, err = volClient.GetVolumeRootPathCeph(ctx)
|
||||
if err != nil {
|
||||
@ -339,6 +402,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
log.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
|
||||
vID.FsSubvolName, requestName)
|
||||
@ -353,8 +417,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
VolumeContext: volumeContext,
|
||||
}
|
||||
if volOptions.Topology != nil {
|
||||
volume.AccessibleTopology =
|
||||
[]*csi.Topology{
|
||||
volume.AccessibleTopology = []*csi.Topology{
|
||||
{
|
||||
Segments: volOptions.Topology,
|
||||
},
|
||||
@ -367,7 +430,8 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// DeleteVolume deletes the volume in backend and its reservation.
|
||||
func (cs *ControllerServer) DeleteVolume(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
req *csi.DeleteVolumeRequest,
|
||||
) (*csi.DeleteVolumeResponse, error) {
|
||||
if err := cs.validateDeleteVolumeRequest(); err != nil {
|
||||
log.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err)
|
||||
|
||||
@ -449,16 +513,8 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err = volClient.PurgeVolume(ctx, false); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
|
||||
if !errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if err := cleanUpBackingVolume(ctx, volOptions, vID, cr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := store.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
@ -470,11 +526,90 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func cleanUpBackingVolume(
|
||||
ctx context.Context,
|
||||
volOptions *store.VolumeOptions,
|
||||
volID *store.VolumeIdentifier,
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
if !volOptions.BackingSnapshot {
|
||||
// Regular volumes need to be purged.
|
||||
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err := volClient.PurgeVolume(ctx, false); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
|
||||
if !errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshot-backed volumes need to un-reference the backing snapshot, and
|
||||
// the snapshot itself may need to be deleted if its reftracker doesn't
|
||||
// hold any references anymore.
|
||||
|
||||
backingSnapNeedsDelete, err := store.UnrefSnapshotBackedVolume(ctx, volOptions)
|
||||
if err != nil {
|
||||
if errors.Is(err, rterrors.ErrObjectOutOfDate) {
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if !backingSnapNeedsDelete {
|
||||
return nil
|
||||
}
|
||||
|
||||
snapParentVolOptions, _, snapID, err := store.NewSnapshotOptionsFromID(ctx, volOptions.BackingSnapshotID, cr)
|
||||
if err != nil {
|
||||
absorbErrs := []error{
|
||||
util.ErrPoolNotFound,
|
||||
util.ErrKeyNotFound,
|
||||
cerrors.ErrSnapNotFound,
|
||||
cerrors.ErrVolumeNotFound,
|
||||
}
|
||||
|
||||
fatalErr := true
|
||||
for i := range absorbErrs {
|
||||
if errors.Is(err, absorbErrs[i]) {
|
||||
fatalErr = false
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fatalErr {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
snapClient := core.NewSnapshot(
|
||||
snapParentVolOptions.GetConnection(),
|
||||
snapID.FsSnapshotName,
|
||||
&snapParentVolOptions.SubVolume,
|
||||
)
|
||||
|
||||
err = deleteSnapshotAndUndoReservation(ctx, snapClient, snapParentVolOptions, snapID, cr)
|
||||
if err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateVolumeCapabilities checks whether the volume capabilities requested
|
||||
// are supported.
|
||||
func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
req *csi.ValidateVolumeCapabilitiesRequest,
|
||||
) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
// Cephfs doesn't support Block volume
|
||||
for _, capability := range req.VolumeCapabilities {
|
||||
if capability.GetBlock() != nil {
|
||||
@ -492,7 +627,8 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
// ControllerExpandVolume expands CephFS Volumes on demand based on resizer request.
|
||||
func (cs *ControllerServer) ControllerExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
req *csi.ControllerExpandVolumeRequest,
|
||||
) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
if err := cs.validateExpandVolumeRequest(req); err != nil {
|
||||
log.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err)
|
||||
|
||||
@ -532,6 +668,10 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
|
||||
if volOptions.BackingSnapshot {
|
||||
return nil, status.Error(codes.InvalidArgument, "cannot expand snapshot-backed volume")
|
||||
}
|
||||
|
||||
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err = volClient.ResizeVolume(ctx, RoundOffSize); err != nil {
|
||||
@ -551,7 +691,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
// nolint:gocyclo,cyclop // golangci-lint did not catch this earlier, needs to get fixed late
|
||||
func (cs *ControllerServer) CreateSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
req *csi.CreateSnapshotRequest,
|
||||
) (*csi.CreateSnapshotResponse, error) {
|
||||
if err := cs.validateSnapshotReq(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -609,6 +750,10 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
parentVolOptions.ClusterID)
|
||||
}
|
||||
|
||||
if parentVolOptions.BackingSnapshot {
|
||||
return nil, status.Error(codes.InvalidArgument, "cannot snapshot a snapshot-backed volume")
|
||||
}
|
||||
|
||||
cephfsSnap, genSnapErr := store.GenSnapFromOptions(ctx, req)
|
||||
if genSnapErr != nil {
|
||||
return nil, status.Error(codes.Internal, genSnapErr.Error())
|
||||
@ -714,7 +859,8 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
func doSnapshot(
|
||||
ctx context.Context,
|
||||
volOpt *store.VolumeOptions,
|
||||
snapshotName string) (core.SnapshotInfo, error) {
|
||||
snapshotName string,
|
||||
) (core.SnapshotInfo, error) {
|
||||
snapID := fsutil.VolumeID(snapshotName)
|
||||
snap := core.SnapshotInfo{}
|
||||
snapClient := core.NewSnapshot(volOpt.GetConnection(), snapshotName, &volOpt.SubVolume)
|
||||
@ -773,9 +919,11 @@ func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.Cr
|
||||
|
||||
// DeleteSnapshot deletes the snapshot in backend and removes the
|
||||
// snapshot metadata from store.
|
||||
// nolint:gocyclo,cyclop // TODO: reduce complexity
|
||||
func (cs *ControllerServer) DeleteSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
||||
req *csi.DeleteSnapshotRequest,
|
||||
) (*csi.DeleteSnapshotResponse, error) {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
@ -870,17 +1018,51 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
err = snapClient.DeleteSnapshot(ctx)
|
||||
|
||||
needsDelete, err := store.UnrefSelfInSnapshotBackedVolumes(ctx, volOpt, sid.SnapshotID)
|
||||
if err != nil {
|
||||
if errors.Is(err, rterrors.ErrObjectOutOfDate) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if needsDelete {
|
||||
err = deleteSnapshotAndUndoReservation(
|
||||
ctx,
|
||||
snapClient,
|
||||
volOpt,
|
||||
sid,
|
||||
cr,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
}
|
||||
|
||||
func deleteSnapshotAndUndoReservation(
|
||||
ctx context.Context,
|
||||
snapClient core.SnapshotClient,
|
||||
parentVolOptions *store.VolumeOptions,
|
||||
snapID *store.SnapshotIdentifier,
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
err := snapClient.DeleteSnapshot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.UndoSnapReservation(ctx, parentVolOptions, *snapID, snapID.RequestName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
snapID.RequestName, snapID.RequestName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -64,7 +64,8 @@ func (cs cephFSCloneState) toError() error {
|
||||
// CreateCloneFromSubvolume creates a clone from a subvolume.
|
||||
func (s *subVolumeClient) CreateCloneFromSubvolume(
|
||||
ctx context.Context,
|
||||
parentvolOpt *SubVolume) error {
|
||||
parentvolOpt *SubVolume,
|
||||
) error {
|
||||
snapshotID := s.VolID
|
||||
snapClient := NewSnapshot(s.conn, snapshotID, parentvolOpt)
|
||||
err := snapClient.CreateSnapshot(ctx)
|
||||
@ -159,7 +160,8 @@ func (s *subVolumeClient) CreateCloneFromSubvolume(
|
||||
|
||||
// CleanupSnapshotFromSubvolume removes the snapshot from the subvolume.
|
||||
func (s *subVolumeClient) CleanupSnapshotFromSubvolume(
|
||||
ctx context.Context, parentVol *SubVolume) error {
|
||||
ctx context.Context, parentVol *SubVolume,
|
||||
) error {
|
||||
// snapshot name is same as clone name as we need a name which can be
|
||||
// identified during PVC-PVC cloning.
|
||||
snapShotID := s.VolID
|
||||
@ -193,7 +195,8 @@ func (s *subVolumeClient) CleanupSnapshotFromSubvolume(
|
||||
|
||||
// CreateSnapshotFromSubvolume creates a clone from subvolume snapshot.
|
||||
func (s *subVolumeClient) CreateCloneFromSnapshot(
|
||||
ctx context.Context, snap Snapshot) error {
|
||||
ctx context.Context, snap Snapshot,
|
||||
) error {
|
||||
snapID := snap.SnapshotID
|
||||
snapClient := NewSnapshot(s.conn, snapID, snap.SubVolume)
|
||||
err := snapClient.CloneSnapshot(ctx, s.SubVolume)
|
||||
|
@ -192,7 +192,7 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
// Unmount and mount the volume.
|
||||
|
||||
if stagingTargetMs != msMounted {
|
||||
if err := mounter.UnmountVolume(ctx, stagingTargetPath); err != nil {
|
||||
if err := mounter.UnmountAll(ctx, stagingTargetPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -269,5 +269,5 @@ func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
|
||||
|
||||
// Restoration here means only unmounting the volume.
|
||||
// NodeStageVolume should take care of the rest.
|
||||
return mounter.UnmountVolume(ctx, stagingTargetPath)
|
||||
return mounter.UnmountAll(ctx, stagingTargetPath)
|
||||
}
|
||||
|
@ -33,7 +33,8 @@ type IdentityServer struct {
|
||||
// GetPluginCapabilities returns available capabilities of the ceph driver.
|
||||
func (is *IdentityServer) GetPluginCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
req *csi.GetPluginCapabilitiesRequest,
|
||||
) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
|
@ -107,7 +107,8 @@ func (m *FuseMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *store.VolumeOptions) error {
|
||||
volOptions *store.VolumeOptions,
|
||||
) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -117,8 +118,8 @@ func (m *FuseMounter) Mount(
|
||||
|
||||
func (m *FuseMounter) Name() string { return "Ceph FUSE driver" }
|
||||
|
||||
func UnmountVolume(ctx context.Context, mountPoint string) error {
|
||||
if _, stderr, err := util.ExecCommand(ctx, "umount", mountPoint); err != nil {
|
||||
func UnmountVolume(ctx context.Context, mountPoint string, opts ...string) error {
|
||||
if _, stderr, err := util.ExecCommand(ctx, "umount", append([]string{mountPoint}, opts...)...); err != nil {
|
||||
err = fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
if strings.Contains(err.Error(), fmt.Sprintf("umount: %s: not mounted", mountPoint)) ||
|
||||
strings.Contains(err.Error(), "No such file or directory") {
|
||||
@ -148,3 +149,7 @@ func UnmountVolume(ctx context.Context, mountPoint string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnmountAll(ctx context.Context, mountPoint string) error {
|
||||
return UnmountVolume(ctx, mountPoint, "--all-targets")
|
||||
}
|
||||
|
@ -72,7 +72,8 @@ func (m *KernelMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *store.VolumeOptions) error {
|
||||
volOptions *store.VolumeOptions,
|
||||
) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
@ -47,7 +48,8 @@ type NodeServer struct {
|
||||
|
||||
func getCredentialsForVolume(
|
||||
volOptions *store.VolumeOptions,
|
||||
secrets map[string]string) (*util.Credentials, error) {
|
||||
secrets map[string]string,
|
||||
) (*util.Credentials, error) {
|
||||
var (
|
||||
err error
|
||||
cr *util.Credentials
|
||||
@ -100,10 +102,25 @@ func (ns *NodeServer) getVolumeOptions(
|
||||
return volOptions, nil
|
||||
}
|
||||
|
||||
func validateSnapshotBackedVolCapability(volCap *csi.VolumeCapability) error {
|
||||
// Snapshot-backed volumes may be used with read-only volume access modes only.
|
||||
|
||||
mode := volCap.AccessMode.Mode
|
||||
|
||||
if mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY &&
|
||||
mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||
return status.Error(codes.InvalidArgument,
|
||||
"snapshot-backed volume supports only read-only access mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeStageVolume mounts the volume to a staging path on the node.
|
||||
func (ns *NodeServer) NodeStageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
) (*csi.NodeStageVolumeResponse, error) {
|
||||
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -126,12 +143,24 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer volOptions.Destroy()
|
||||
|
||||
// Skip extracting NetNamespaceFilePath if the clusterID is empty.
|
||||
// In case of pre-provisioned volume the clusterID is not set in the
|
||||
// volume context.
|
||||
if volOptions.ClusterID != "" {
|
||||
volOptions.NetNamespaceFilePath, err = util.GetCephFSNetNamespaceFilePath(
|
||||
util.CsiConfigFile,
|
||||
volOptions.ClusterID)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if volOptions.BackingSnapshot {
|
||||
if err = validateSnapshotBackedVolCapability(req.GetVolumeCapability()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
mnt, err := mounter.New(volOptions)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
@ -184,7 +213,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
log.ErrorLog(ctx, "cephfs: failed to write NodeStageMountinfo for volume %s: %v", volID, err)
|
||||
|
||||
// Try to clean node stage mount.
|
||||
if unmountErr := mounter.UnmountVolume(ctx, stagingTargetPath); unmountErr != nil {
|
||||
if unmountErr := mounter.UnmountAll(ctx, stagingTargetPath); unmountErr != nil {
|
||||
log.ErrorLog(ctx, "cephfs: failed to unmount %s in WriteNodeStageMountinfo clean up: %v",
|
||||
stagingTargetPath, unmountErr)
|
||||
}
|
||||
@ -215,7 +244,7 @@ func (*NodeServer) mount(
|
||||
|
||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
|
||||
|
||||
readOnly := "ro"
|
||||
const readOnly = "ro"
|
||||
|
||||
if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
||||
volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||
@ -240,14 +269,118 @@ func (*NodeServer) mount(
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
unmountErr := mounter.UnmountAll(ctx, stagingTargetPath)
|
||||
if unmountErr != nil {
|
||||
log.ErrorLog(ctx, "failed to clean up mounts in rollback procedure: %v", unmountErr)
|
||||
}
|
||||
}()
|
||||
|
||||
if volOptions.BackingSnapshot {
|
||||
snapshotRoot, err := getBackingSnapshotRoot(ctx, volOptions, stagingTargetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
absoluteSnapshotRoot := path.Join(stagingTargetPath, snapshotRoot)
|
||||
err = mounter.BindMount(
|
||||
ctx,
|
||||
absoluteSnapshotRoot,
|
||||
stagingTargetPath,
|
||||
true,
|
||||
[]string{"bind", "_netdev"},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx,
|
||||
"failed to bind mount snapshot root %s: %v", absoluteSnapshotRoot, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBackingSnapshotRoot(
|
||||
ctx context.Context,
|
||||
volOptions *store.VolumeOptions,
|
||||
stagingTargetPath string,
|
||||
) (string, error) {
|
||||
if volOptions.ProvisionVolume {
|
||||
// Provisioned snapshot-backed volumes should have their BackingSnapshotRoot
|
||||
// already populated.
|
||||
return volOptions.BackingSnapshotRoot, nil
|
||||
}
|
||||
|
||||
// Pre-provisioned snapshot-backed volumes are more involved:
|
||||
//
|
||||
// Snapshots created with `ceph fs subvolume snapshot create` have following
|
||||
// snap directory name format inside <root path>/.snap:
|
||||
//
|
||||
// _<snapshot>_<snapshot inode number>
|
||||
//
|
||||
// We don't know what <snapshot inode number> is, and so <root path>/.snap
|
||||
// needs to be traversed in order to determine the full snapshot directory name.
|
||||
|
||||
snapshotsBase := path.Join(stagingTargetPath, ".snap")
|
||||
|
||||
dir, err := os.Open(snapshotsBase)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to open %s when searching for snapshot root: %v", snapshotsBase, err)
|
||||
|
||||
return "", status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// Read the contents of <root path>/.snap directory into a string slice.
|
||||
|
||||
contents, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to read %s when searching for snapshot root: %v", snapshotsBase, err)
|
||||
|
||||
return "", status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
var (
|
||||
found bool
|
||||
snapshotDirName string
|
||||
)
|
||||
|
||||
// Look through the directory's contents and try to find the correct snapshot
|
||||
// dir name. The search must be exhaustive to catch possible ambiguous results.
|
||||
|
||||
for i := range contents {
|
||||
if !strings.Contains(contents[i], volOptions.BackingSnapshotID) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !found {
|
||||
found = true
|
||||
snapshotDirName = contents[i]
|
||||
} else {
|
||||
return "", status.Errorf(codes.InvalidArgument, "ambiguous backingSnapshotID %s in %s",
|
||||
volOptions.BackingSnapshotID, snapshotsBase)
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return "", status.Errorf(codes.InvalidArgument, "no snapshot with backingSnapshotID %s found in %s",
|
||||
volOptions.BackingSnapshotID, snapshotsBase)
|
||||
}
|
||||
|
||||
return path.Join(".snap", snapshotDirName), nil
|
||||
}
|
||||
|
||||
// NodePublishVolume mounts the volume mounted to the staging path to the target
|
||||
// path.
|
||||
func (ns *NodeServer) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
req *csi.NodePublishVolumeRequest,
|
||||
) (*csi.NodePublishVolumeResponse, error) {
|
||||
mountOptions := []string{"bind", "_netdev"}
|
||||
if err := util.ValidateNodePublishVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
@ -330,7 +463,8 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
// NodeUnpublishVolume unmounts the volume from the target path.
|
||||
func (ns *NodeServer) NodeUnpublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
req *csi.NodeUnpublishVolumeRequest,
|
||||
) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
var err error
|
||||
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
@ -385,7 +519,8 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
// NodeUnstageVolume unstages the volume from the staging path.
|
||||
func (ns *NodeServer) NodeUnstageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
req *csi.NodeUnstageVolumeRequest,
|
||||
) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
var err error
|
||||
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
@ -433,7 +568,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
// Unmount the volume
|
||||
if err = mounter.UnmountVolume(ctx, stagingTargetPath); err != nil {
|
||||
if err = mounter.UnmountAll(ctx, stagingTargetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -445,7 +580,8 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
// NodeGetCapabilities returns the supported capabilities of the node server.
|
||||
func (ns *NodeServer) NodeGetCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
req *csi.NodeGetCapabilitiesRequest,
|
||||
) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
return &csi.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csi.NodeServiceCapability{
|
||||
{
|
||||
@ -476,7 +612,8 @@ func (ns *NodeServer) NodeGetCapabilities(
|
||||
// NodeGetVolumeStats returns volume stats.
|
||||
func (ns *NodeServer) NodeGetVolumeStats(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
req *csi.NodeGetVolumeStatsRequest,
|
||||
) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
var err error
|
||||
targetPath := req.GetVolumePath()
|
||||
if targetPath == "" {
|
||||
|
168
internal/cephfs/store/backingsnapshot.go
Normal file
168
internal/cephfs/store/backingsnapshot.go
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
Copyright 2022 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
"github.com/ceph/ceph-csi/internal/util/reftracker"
|
||||
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
|
||||
"github.com/ceph/ceph-csi/internal/util/reftracker/reftype"
|
||||
)
|
||||
|
||||
func fmtBackingSnapshotReftrackerName(backingSnapID string) string {
|
||||
return fmt.Sprintf("rt-backingsnapshot-%s", backingSnapID)
|
||||
}
|
||||
|
||||
func AddSnapshotBackedVolumeRef(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
) error {
|
||||
ioctx, err := volOptions.conn.GetIoctx(volOptions.MetadataPool)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create RADOS ioctx: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
defer ioctx.Destroy()
|
||||
|
||||
ioctx.SetNamespace(fsutil.RadosNamespace)
|
||||
|
||||
var (
|
||||
backingSnapID = volOptions.BackingSnapshotID
|
||||
ioctxW = radoswrapper.NewIOContext(ioctx)
|
||||
)
|
||||
|
||||
created, err := reftracker.Add(
|
||||
ioctxW,
|
||||
fmtBackingSnapshotReftrackerName(backingSnapID),
|
||||
map[string]struct{}{
|
||||
backingSnapID: {},
|
||||
volOptions.VolID: {},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to add refs for backing snapshot %s: %v",
|
||||
backingSnapID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Clean up after failure.
|
||||
|
||||
var deleted bool
|
||||
deleted, err = reftracker.Remove(
|
||||
ioctxW,
|
||||
fmtBackingSnapshotReftrackerName(backingSnapID),
|
||||
map[string]reftype.RefType{
|
||||
backingSnapID: reftype.Normal,
|
||||
volOptions.VolID: reftype.Normal,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove refs in cleanup procedure for backing snapshot %s: %v",
|
||||
backingSnapID, err)
|
||||
}
|
||||
|
||||
if created && !deleted {
|
||||
log.ErrorLog(ctx, "orphaned reftracker object %s (pool %s, namespace %s)",
|
||||
backingSnapID, volOptions.MetadataPool, fsutil.RadosNamespace)
|
||||
}
|
||||
}()
|
||||
|
||||
// There may have been a race between adding a ref to the reftracker and
|
||||
// deleting the backing snapshot. Make sure the snapshot still exists by
|
||||
// trying to retrieve it again.
|
||||
_, _, _, err = NewSnapshotOptionsFromID(ctx, volOptions.BackingSnapshotID, volOptions.conn.Creds)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get backing snapshot %s: %v", volOptions.BackingSnapshotID, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func UnrefSnapshotBackedVolume(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
) (bool, error) {
|
||||
ioctx, err := volOptions.conn.GetIoctx(volOptions.MetadataPool)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create RADOS ioctx: %s", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
defer ioctx.Destroy()
|
||||
|
||||
ioctx.SetNamespace(fsutil.RadosNamespace)
|
||||
|
||||
var (
|
||||
backingSnapID = volOptions.BackingSnapshotID
|
||||
ioctxW = radoswrapper.NewIOContext(ioctx)
|
||||
)
|
||||
|
||||
deleted, err := reftracker.Remove(
|
||||
ioctxW,
|
||||
fmtBackingSnapshotReftrackerName(backingSnapID),
|
||||
map[string]reftype.RefType{
|
||||
volOptions.VolID: reftype.Normal,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove refs for backing snapshot %s: %v",
|
||||
backingSnapID, err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return deleted, err
|
||||
}
|
||||
|
||||
// UnrefSelfInSnapshotBackedVolumes removes (masks) snapshot ID in the
|
||||
// reftracker for volumes backed by this snapshot. The returned boolean
|
||||
// value signals whether the snapshot is not referenced by any such volumes
|
||||
// and needs to be removed.
|
||||
func UnrefSelfInSnapshotBackedVolumes(
|
||||
ctx context.Context,
|
||||
snapParentVolOptions *VolumeOptions,
|
||||
snapshotID string,
|
||||
) (bool, error) {
|
||||
ioctx, err := snapParentVolOptions.conn.GetIoctx(snapParentVolOptions.MetadataPool)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create RADOS ioctx: %s", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
defer ioctx.Destroy()
|
||||
|
||||
ioctx.SetNamespace(fsutil.RadosNamespace)
|
||||
|
||||
return reftracker.Remove(
|
||||
radoswrapper.NewIOContext(ioctx),
|
||||
fmtBackingSnapshotReftrackerName(snapshotID),
|
||||
map[string]reftype.RefType{
|
||||
snapshotID: reftype.Mask,
|
||||
},
|
||||
)
|
||||
}
|
@ -77,7 +77,8 @@ func CheckVolExists(ctx context.Context,
|
||||
|
||||
pvID *VolumeIdentifier,
|
||||
sID *SnapshotIdentifier,
|
||||
cr *util.Credentials) (*VolumeIdentifier, error) {
|
||||
cr *util.Credentials,
|
||||
) (*VolumeIdentifier, error) {
|
||||
var vid VolumeIdentifier
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
@ -99,7 +100,7 @@ func CheckVolExists(ctx context.Context,
|
||||
volOptions.VolID = vid.FsSubvolName
|
||||
|
||||
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if sID != nil || pvID != nil {
|
||||
if (sID != nil || pvID != nil) && imageData.ImageAttributes.BackingSnapshotID == "" {
|
||||
cloneState, cloneStateErr := vol.GetCloneState(ctx)
|
||||
if cloneStateErr != nil {
|
||||
if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) {
|
||||
@ -152,6 +153,8 @@ func CheckVolExists(ctx context.Context,
|
||||
return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName)
|
||||
}
|
||||
}
|
||||
|
||||
if imageData.ImageAttributes.BackingSnapshotID == "" {
|
||||
volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
@ -172,6 +175,7 @@ func CheckVolExists(ctx context.Context,
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// check if topology constraints match what is found
|
||||
// TODO: we need an API to fetch subvolume attributes (size/datapool and others), based
|
||||
@ -205,7 +209,8 @@ func UndoVolReservation(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
vid VolumeIdentifier,
|
||||
secret map[string]string) error {
|
||||
secret map[string]string,
|
||||
) error {
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -269,7 +274,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin
|
||||
imageUUID, vid.FsSubvolName, err = j.ReserveName(
|
||||
ctx, volOptions.MetadataPool, util.InvalidPoolID,
|
||||
volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName,
|
||||
volOptions.NamePrefix, "", "", volOptions.ReservedID, "")
|
||||
volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -294,7 +299,8 @@ func ReserveSnap(
|
||||
volOptions *VolumeOptions,
|
||||
parentSubVolName string,
|
||||
snap *SnapshotOption,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, error) {
|
||||
cr *util.Credentials,
|
||||
) (*SnapshotIdentifier, error) {
|
||||
var (
|
||||
vid SnapshotIdentifier
|
||||
imageUUID string
|
||||
@ -311,7 +317,7 @@ func ReserveSnap(
|
||||
imageUUID, vid.FsSnapshotName, err = j.ReserveName(
|
||||
ctx, volOptions.MetadataPool, util.InvalidPoolID,
|
||||
volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName,
|
||||
snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "")
|
||||
snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -335,7 +341,8 @@ func UndoSnapReservation(
|
||||
volOptions *VolumeOptions,
|
||||
vid SnapshotIdentifier,
|
||||
snapName string,
|
||||
cr *util.Credentials) error {
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
@ -367,7 +374,8 @@ func CheckSnapExists(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
snap *SnapshotOption,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, *core.SnapshotInfo, error) {
|
||||
cr *util.Credentials,
|
||||
) (*SnapshotIdentifier, *core.SnapshotInfo, error) {
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -34,27 +35,31 @@ import (
|
||||
|
||||
type VolumeOptions struct {
|
||||
core.SubVolume
|
||||
TopologyPools *[]util.TopologyConstrainedPool
|
||||
TopologyRequirement *csi.TopologyRequirement
|
||||
Topology map[string]string
|
||||
|
||||
RequestName string
|
||||
NamePrefix string
|
||||
ClusterID string
|
||||
FscID int64
|
||||
MetadataPool string
|
||||
// ReservedID represents the ID reserved for a subvolume
|
||||
ReservedID string
|
||||
Monitors string `json:"monitors"`
|
||||
RootPath string `json:"rootPath"`
|
||||
Mounter string `json:"mounter"`
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
BackingSnapshotRoot string // Snapshot root relative to RootPath.
|
||||
BackingSnapshotID string
|
||||
KernelMountOptions string `json:"kernelMountOptions"`
|
||||
FuseMountOptions string `json:"fuseMountOptions"`
|
||||
// Network namespace file path to execute nsenter command
|
||||
NetNamespaceFilePath string
|
||||
TopologyPools *[]util.TopologyConstrainedPool
|
||||
TopologyRequirement *csi.TopologyRequirement
|
||||
Topology map[string]string
|
||||
FscID int64
|
||||
|
||||
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
||||
conn *util.ClusterConnection
|
||||
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
BackingSnapshot bool `json:"backingSnapshot"`
|
||||
}
|
||||
|
||||
// Connect a CephFS volume to the Ceph cluster.
|
||||
@ -184,12 +189,19 @@ func (vo *VolumeOptions) GetConnection() *util.ClusterConnection {
|
||||
return vo.conn
|
||||
}
|
||||
|
||||
func fmtBackingSnapshotOptionMismatch(optName, expected, actual string) error {
|
||||
return fmt.Errorf("%s option mismatch with backing snapshot: got %s, expected %s",
|
||||
optName, actual, expected)
|
||||
}
|
||||
|
||||
// NewVolumeOptions generates a new instance of volumeOptions from the provided
|
||||
// CSI request parameters.
|
||||
func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*VolumeOptions, error) {
|
||||
cr *util.Credentials,
|
||||
) (*VolumeOptions, error) {
|
||||
var (
|
||||
opts VolumeOptions
|
||||
backingSnapshotBool string
|
||||
err error
|
||||
)
|
||||
|
||||
@ -227,6 +239,16 @@ func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = extractOptionalOption(&backingSnapshotBool, "backingSnapshot", volOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if backingSnapshotBool != "" {
|
||||
if opts.BackingSnapshot, err = strconv.ParseBool(backingSnapshotBool); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse backingSnapshot: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.RequestName = requestName
|
||||
|
||||
err = opts.Connect(cr)
|
||||
@ -260,6 +282,19 @@ func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
|
||||
|
||||
opts.ProvisionVolume = true
|
||||
|
||||
if opts.BackingSnapshot {
|
||||
if req.GetVolumeContentSource() == nil || req.GetVolumeContentSource().GetSnapshot() == nil {
|
||||
return nil, errors.New("backingSnapshot option requires snapshot volume source")
|
||||
}
|
||||
|
||||
opts.BackingSnapshotID = req.GetVolumeContentSource().GetSnapshot().GetSnapshotId()
|
||||
|
||||
err = opts.populateVolumeOptionsFromBackingSnapshot(ctx, cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &opts, nil
|
||||
}
|
||||
|
||||
@ -268,7 +303,8 @@ func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
|
||||
func NewVolumeOptionsFromVolID(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
volOpt, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
volOpt, secrets map[string]string,
|
||||
) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
volOptions VolumeOptions
|
||||
@ -362,28 +398,115 @@ func NewVolumeOptionsFromVolID(
|
||||
}
|
||||
}
|
||||
|
||||
volOptions.ProvisionVolume = true
|
||||
volOptions.SubVolume.VolID = vid.FsSubvolName
|
||||
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
|
||||
info, err := vol.GetSubVolumeInfo(ctx)
|
||||
if err == nil {
|
||||
volOptions.RootPath = info.Path
|
||||
volOptions.Features = info.Features
|
||||
volOptions.Size = info.BytesQuota
|
||||
if imageAttributes.BackingSnapshotID != "" || volOptions.BackingSnapshotID != "" {
|
||||
volOptions.BackingSnapshot = true
|
||||
volOptions.BackingSnapshotID = imageAttributes.BackingSnapshotID
|
||||
}
|
||||
|
||||
if errors.Is(err, cerrors.ErrInvalidCommand) {
|
||||
volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
|
||||
volOptions.ProvisionVolume = true
|
||||
volOptions.SubVolume.VolID = vid.FsSubvolName
|
||||
|
||||
if volOptions.BackingSnapshot {
|
||||
err = volOptions.populateVolumeOptionsFromBackingSnapshot(ctx, cr)
|
||||
} else {
|
||||
err = volOptions.populateVolumeOptionsFromSubvolume(ctx)
|
||||
}
|
||||
|
||||
return &volOptions, &vid, err
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) populateVolumeOptionsFromSubvolume(ctx context.Context) error {
|
||||
vol := core.NewSubVolume(vo.conn, &vo.SubVolume, vo.ClusterID)
|
||||
|
||||
var info *core.Subvolume
|
||||
info, err := vol.GetSubVolumeInfo(ctx)
|
||||
if err == nil {
|
||||
vo.RootPath = info.Path
|
||||
vo.Features = info.Features
|
||||
vo.Size = info.BytesQuota
|
||||
}
|
||||
|
||||
if errors.Is(err, cerrors.ErrInvalidCommand) {
|
||||
vo.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) populateVolumeOptionsFromBackingSnapshot(
|
||||
ctx context.Context,
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
// As of CephFS snapshot v2 API, snapshots may be found in two locations:
|
||||
//
|
||||
// (a) /volumes/<volume group>/<subvolume>/.snap/<snapshot>/<UUID>
|
||||
// (b) /volumes/<volume group>/<subvolume>/<UUID>/.snap/_<snapshot>_<snapshot inode number>
|
||||
|
||||
if !vo.ProvisionVolume {
|
||||
// Case (b)
|
||||
//
|
||||
// If the volume is not provisioned by us, we assume that we have access only
|
||||
// to snapshot's parent volume root. In this case, o.RootPath is expected to
|
||||
// be already set in the volume context.
|
||||
|
||||
// BackingSnapshotRoot cannot be determined at this stage, because the
|
||||
// full directory name is not known (see snapshot path format for case
|
||||
// (b) above). RootPath/.snap must be traversed in order to find out
|
||||
// the snapshot directory name.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
parentBackingSnapVolOpts, _, snapID, err := NewSnapshotOptionsFromID(ctx, vo.BackingSnapshotID, cr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve backing snapshot %s: %w", vo.BackingSnapshotID, err)
|
||||
}
|
||||
|
||||
// Ensure that backing snapshot parent's volume options match the context.
|
||||
// Snapshot-backed volume inherits all its parent's (parent of the snapshot) options.
|
||||
|
||||
if vo.ClusterID != parentBackingSnapVolOpts.ClusterID {
|
||||
return fmtBackingSnapshotOptionMismatch("clusterID", vo.ClusterID, parentBackingSnapVolOpts.ClusterID)
|
||||
}
|
||||
|
||||
if vo.Pool != "" {
|
||||
return errors.New("cannot set pool for snapshot-backed volume")
|
||||
}
|
||||
|
||||
if vo.MetadataPool != parentBackingSnapVolOpts.MetadataPool {
|
||||
return fmtBackingSnapshotOptionMismatch("MetadataPool", vo.MetadataPool, parentBackingSnapVolOpts.MetadataPool)
|
||||
}
|
||||
|
||||
if vo.FsName != parentBackingSnapVolOpts.FsName {
|
||||
return fmtBackingSnapshotOptionMismatch("fsName", vo.FsName, parentBackingSnapVolOpts.FsName)
|
||||
}
|
||||
|
||||
if vo.SubvolumeGroup != parentBackingSnapVolOpts.SubvolumeGroup {
|
||||
return fmtBackingSnapshotOptionMismatch("SubvolumeGroup", vo.SubvolumeGroup, parentBackingSnapVolOpts.SubvolumeGroup)
|
||||
}
|
||||
|
||||
vo.Features = parentBackingSnapVolOpts.Features
|
||||
vo.Size = parentBackingSnapVolOpts.Size
|
||||
|
||||
// For case (a) (o.ProvisionVolume==true is assumed), snapshot root path
|
||||
// can be built out of subvolume root path, which is in following format:
|
||||
//
|
||||
// /volumes/<volume group>/<subvolume>/<subvolume UUID>
|
||||
|
||||
subvolRoot, subvolUUID := path.Split(parentBackingSnapVolOpts.RootPath)
|
||||
|
||||
vo.RootPath = subvolRoot
|
||||
vo.BackingSnapshotRoot = path.Join(".snap", snapID.FsSnapshotName, subvolUUID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewVolumeOptionsFromMonitorList generates a new instance of VolumeOptions and
|
||||
// VolumeIdentifier from the provided CSI volume context.
|
||||
func NewVolumeOptionsFromMonitorList(
|
||||
volID string,
|
||||
options, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
options, secrets map[string]string,
|
||||
) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
opts VolumeOptions
|
||||
vid VolumeIdentifier
|
||||
@ -435,9 +558,17 @@ func NewVolumeOptionsFromMonitorList(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err = extractOptionalOption(&opts.BackingSnapshotID, "backingSnapshotID", options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
vid.FsSubvolName = volID
|
||||
vid.VolumeID = volID
|
||||
|
||||
if opts.BackingSnapshotID != "" {
|
||||
opts.BackingSnapshot = true
|
||||
}
|
||||
|
||||
return &opts, &vid, nil
|
||||
}
|
||||
|
||||
@ -446,7 +577,8 @@ func NewVolumeOptionsFromMonitorList(
|
||||
// detected to be a statically provisioned volume.
|
||||
func NewVolumeOptionsFromStaticVolume(
|
||||
volID string,
|
||||
options map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
options map[string]string,
|
||||
) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
opts VolumeOptions
|
||||
vid VolumeIdentifier
|
||||
@ -507,6 +639,10 @@ func NewVolumeOptionsFromStaticVolume(
|
||||
vid.FsSubvolName = opts.RootPath
|
||||
vid.VolumeID = volID
|
||||
|
||||
if opts.BackingSnapshotID != "" {
|
||||
opts.BackingSnapshot = true
|
||||
}
|
||||
|
||||
return &opts, &vid, nil
|
||||
}
|
||||
|
||||
@ -515,7 +651,8 @@ func NewVolumeOptionsFromStaticVolume(
|
||||
func NewSnapshotOptionsFromID(
|
||||
ctx context.Context,
|
||||
snapID string,
|
||||
cr *util.Credentials) (*VolumeOptions, *core.SnapshotInfo, *SnapshotIdentifier, error) {
|
||||
cr *util.Credentials,
|
||||
) (*VolumeOptions, *core.SnapshotInfo, *SnapshotIdentifier, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
volOptions VolumeOptions
|
||||
@ -594,6 +731,7 @@ func NewSnapshotOptionsFromID(
|
||||
}
|
||||
volOptions.Features = subvolInfo.Features
|
||||
volOptions.Size = subvolInfo.BytesQuota
|
||||
volOptions.RootPath = subvolInfo.Path
|
||||
snap := core.NewSnapshot(volOptions.conn, sid.FsSnapshotName, &volOptions.SubVolume)
|
||||
info, err := snap.GetSnapshotInfo(ctx)
|
||||
if err != nil {
|
||||
|
@ -37,6 +37,7 @@ type Manager interface {
|
||||
type Config struct {
|
||||
DriverName string
|
||||
Namespace string
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
// ControllerList holds the list of managers need to be started.
|
||||
|
@ -93,7 +93,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
func (r *ReconcilePersistentVolume) getCredentials(
|
||||
ctx context.Context,
|
||||
name,
|
||||
namespace string) (*util.Credentials, error) {
|
||||
namespace string,
|
||||
) (*util.Credentials, error) {
|
||||
var cr *util.Credentials
|
||||
|
||||
if name == "" || namespace == "" {
|
||||
@ -183,6 +184,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
|
||||
volumeHandler,
|
||||
requestName,
|
||||
pvcNamespace,
|
||||
r.config.ClusterName,
|
||||
cr)
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to regenerate journal %s", err)
|
||||
@ -199,7 +201,8 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
|
||||
// Reconcile reconciles the PersistentVolume object and creates a new omap entries
|
||||
// for the volume.
|
||||
func (r *ReconcilePersistentVolume) Reconcile(ctx context.Context,
|
||||
request reconcile.Request) (reconcile.Result, error) {
|
||||
request reconcile.Request,
|
||||
) (reconcile.Result, error) {
|
||||
pv := &corev1.PersistentVolume{}
|
||||
err := r.client.Get(ctx, request.NamespacedName, pv)
|
||||
if err != nil {
|
||||
|
@ -39,7 +39,8 @@ func NewNetworkFence(
|
||||
ctx context.Context,
|
||||
cr *util.Credentials,
|
||||
cidrs []*fence.CIDR,
|
||||
fenceOptions map[string]string) (*NetworkFence, error) {
|
||||
fenceOptions map[string]string,
|
||||
) (*NetworkFence, error) {
|
||||
var err error
|
||||
nwFence := &NetworkFence{}
|
||||
|
||||
|
@ -49,7 +49,8 @@ func (is *IdentityServer) RegisterService(server grpc.ServiceRegistrar) {
|
||||
// GetIdentity returns available capabilities of the rbd driver.
|
||||
func (is *IdentityServer) GetIdentity(
|
||||
ctx context.Context,
|
||||
req *identity.GetIdentityRequest) (*identity.GetIdentityResponse, error) {
|
||||
req *identity.GetIdentityRequest,
|
||||
) (*identity.GetIdentityResponse, error) {
|
||||
// only include Name and VendorVersion, Manifest is optional
|
||||
res := &identity.GetIdentityResponse{
|
||||
Name: is.config.DriverName,
|
||||
@ -62,7 +63,8 @@ func (is *IdentityServer) GetIdentity(
|
||||
// GetCapabilities returns available capabilities of the rbd driver.
|
||||
func (is *IdentityServer) GetCapabilities(
|
||||
ctx context.Context,
|
||||
req *identity.GetCapabilitiesRequest) (*identity.GetCapabilitiesResponse, error) {
|
||||
req *identity.GetCapabilitiesRequest,
|
||||
) (*identity.GetCapabilitiesResponse, error) {
|
||||
// build the list of capabilities, depending on the config
|
||||
caps := make([]*identity.Capability, 0)
|
||||
|
||||
@ -121,7 +123,8 @@ func (is *IdentityServer) GetCapabilities(
|
||||
// still healthy.
|
||||
func (is *IdentityServer) Probe(
|
||||
ctx context.Context,
|
||||
req *identity.ProbeRequest) (*identity.ProbeResponse, error) {
|
||||
req *identity.ProbeRequest,
|
||||
) (*identity.ProbeResponse, error) {
|
||||
// there is nothing that would cause a delay in getting ready
|
||||
res := &identity.ProbeResponse{
|
||||
Ready: &wrapperspb.BoolValue{Value: true},
|
||||
|
@ -60,7 +60,8 @@ func validateNetworkFenceReq(fenceClients []*fence.CIDR, options map[string]stri
|
||||
// to the malicious clients to prevent data corruption.
|
||||
func (fcs *FenceControllerServer) FenceClusterNetwork(
|
||||
ctx context.Context,
|
||||
req *fence.FenceClusterNetworkRequest) (*fence.FenceClusterNetworkResponse, error) {
|
||||
req *fence.FenceClusterNetworkRequest,
|
||||
) (*fence.FenceClusterNetworkResponse, error) {
|
||||
err := validateNetworkFenceReq(req.GetCidrs(), req.Parameters)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
@ -88,7 +89,8 @@ func (fcs *FenceControllerServer) FenceClusterNetwork(
|
||||
// UnfenceClusterNetwork unblocks the access to a CIDR block by removing the network fence.
|
||||
func (fcs *FenceControllerServer) UnfenceClusterNetwork(
|
||||
ctx context.Context,
|
||||
req *fence.UnfenceClusterNetworkRequest) (*fence.UnfenceClusterNetworkResponse, error) {
|
||||
req *fence.UnfenceClusterNetworkRequest,
|
||||
) (*fence.UnfenceClusterNetworkResponse, error) {
|
||||
err := validateNetworkFenceReq(req.GetCidrs(), req.Parameters)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
|
@ -49,7 +49,8 @@ func (rscs *ReclaimSpaceControllerServer) RegisterService(server grpc.ServiceReg
|
||||
|
||||
func (rscs *ReclaimSpaceControllerServer) ControllerReclaimSpace(
|
||||
ctx context.Context,
|
||||
req *rs.ControllerReclaimSpaceRequest) (*rs.ControllerReclaimSpaceResponse, error) {
|
||||
req *rs.ControllerReclaimSpaceRequest,
|
||||
) (*rs.ControllerReclaimSpaceResponse, error) {
|
||||
volumeID := req.GetVolumeId()
|
||||
if volumeID == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
||||
@ -97,10 +98,10 @@ func (rsns *ReclaimSpaceNodeServer) RegisterService(server grpc.ServiceRegistrar
|
||||
// an error is returned to prevent potential data corruption.
|
||||
func (rsns *ReclaimSpaceNodeServer) NodeReclaimSpace(
|
||||
ctx context.Context,
|
||||
req *rs.NodeReclaimSpaceRequest) (*rs.NodeReclaimSpaceResponse, error) {
|
||||
req *rs.NodeReclaimSpaceRequest,
|
||||
) (*rs.NodeReclaimSpaceResponse, error) {
|
||||
// volumeID is a required attribute, it is part of the path to run the
|
||||
// space reducing command on
|
||||
// nolint:ifshort // volumeID is incorrectly assumed to be used only once
|
||||
volumeID := req.GetVolumeId()
|
||||
if volumeID == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
||||
|
@ -34,28 +34,32 @@ type DefaultControllerServer struct {
|
||||
// ControllerPublishVolume publish volume on node.
|
||||
func (cs *DefaultControllerServer) ControllerPublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
|
||||
req *csi.ControllerPublishVolumeRequest,
|
||||
) (*csi.ControllerPublishVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ControllerUnpublishVolume unpublish on node.
|
||||
func (cs *DefaultControllerServer) ControllerUnpublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
|
||||
req *csi.ControllerUnpublishVolumeRequest,
|
||||
) (*csi.ControllerUnpublishVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ListVolumes lists volumes.
|
||||
func (cs *DefaultControllerServer) ListVolumes(
|
||||
ctx context.Context,
|
||||
req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
|
||||
req *csi.ListVolumesRequest,
|
||||
) (*csi.ListVolumesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// GetCapacity get volume capacity.
|
||||
func (cs *DefaultControllerServer) GetCapacity(
|
||||
ctx context.Context,
|
||||
req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
|
||||
req *csi.GetCapacityRequest,
|
||||
) (*csi.GetCapacityResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
@ -63,7 +67,8 @@ func (cs *DefaultControllerServer) GetCapacity(
|
||||
// Default supports all capabilities.
|
||||
func (cs *DefaultControllerServer) ControllerGetCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
req *csi.ControllerGetCapabilitiesRequest,
|
||||
) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
log.TraceLog(ctx, "Using default ControllerGetCapabilities")
|
||||
if cs.Driver == nil {
|
||||
return nil, status.Error(codes.Unimplemented, "Controller server is not enabled")
|
||||
@ -77,13 +82,15 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities(
|
||||
// ListSnapshots lists snapshots.
|
||||
func (cs *DefaultControllerServer) ListSnapshots(
|
||||
ctx context.Context,
|
||||
req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
|
||||
req *csi.ListSnapshotsRequest,
|
||||
) (*csi.ListSnapshotsResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ControllerGetVolume fetch volume information.
|
||||
func (cs *DefaultControllerServer) ControllerGetVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
|
||||
req *csi.ControllerGetVolumeRequest,
|
||||
) (*csi.ControllerGetVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
@ -100,7 +100,8 @@ func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceC
|
||||
|
||||
// AddVolumeCapabilityAccessModes stores volume access modes.
|
||||
func (d *CSIDriver) AddVolumeCapabilityAccessModes(
|
||||
vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
|
||||
vc []csi.VolumeCapability_AccessMode_Mode,
|
||||
) []*csi.VolumeCapability_AccessMode {
|
||||
vca := make([]*csi.VolumeCapability_AccessMode, 0, len(vc))
|
||||
for _, c := range vc {
|
||||
log.DefaultLog("Enabling volume access mode: %v", c.String())
|
||||
|
@ -34,7 +34,8 @@ type DefaultIdentityServer struct {
|
||||
// GetPluginInfo returns plugin information.
|
||||
func (ids *DefaultIdentityServer) GetPluginInfo(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
|
||||
req *csi.GetPluginInfoRequest,
|
||||
) (*csi.GetPluginInfoResponse, error) {
|
||||
log.TraceLog(ctx, "Using default GetPluginInfo")
|
||||
|
||||
if ids.Driver.name == "" {
|
||||
@ -59,7 +60,8 @@ func (ids *DefaultIdentityServer) Probe(ctx context.Context, req *csi.ProbeReque
|
||||
// GetPluginCapabilities returns plugin capabilities.
|
||||
func (ids *DefaultIdentityServer) GetPluginCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
req *csi.GetPluginCapabilitiesRequest,
|
||||
) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
log.TraceLog(ctx, "Using default capabilities")
|
||||
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
|
@ -35,14 +35,16 @@ type DefaultNodeServer struct {
|
||||
// NodeExpandVolume returns unimplemented response.
|
||||
func (ns *DefaultNodeServer) NodeExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
||||
req *csi.NodeExpandVolumeRequest,
|
||||
) (*csi.NodeExpandVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// NodeGetInfo returns node ID.
|
||||
func (ns *DefaultNodeServer) NodeGetInfo(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||
req *csi.NodeGetInfoRequest,
|
||||
) (*csi.NodeGetInfoResponse, error) {
|
||||
log.TraceLog(ctx, "Using default NodeGetInfo")
|
||||
|
||||
csiTopology := &csi.Topology{
|
||||
@ -58,7 +60,8 @@ func (ns *DefaultNodeServer) NodeGetInfo(
|
||||
// NodeGetCapabilities returns RPC unknown capability.
|
||||
func (ns *DefaultNodeServer) NodeGetCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
req *csi.NodeGetCapabilitiesRequest,
|
||||
) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
log.TraceLog(ctx, "Using default NodeGetCapabilities")
|
||||
|
||||
return &csi.NodeGetCapabilitiesResponse{
|
||||
|
@ -173,7 +173,8 @@ func contextIDInjector(
|
||||
ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
atomic.AddUint64(&id, 1)
|
||||
ctx = context.WithValue(ctx, log.CtxKey, id)
|
||||
if reqID := getReqID(req); reqID != "" {
|
||||
@ -187,7 +188,8 @@ func logGRPC(
|
||||
ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (interface{}, error) {
|
||||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
log.ExtendedLog(ctx, "GRPC call: %s", info.FullMethod)
|
||||
if isReplicationRequest(req) {
|
||||
log.TraceLog(ctx, "GRPC request: %s", rp.StripReplicationSecrets(req))
|
||||
@ -205,11 +207,13 @@ func logGRPC(
|
||||
return resp, err
|
||||
}
|
||||
|
||||
//nolint:nonamedreturns // named return used to send recovered panic error.
|
||||
func panicHandler(
|
||||
ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
handler grpc.UnaryHandler,
|
||||
) (resp interface{}, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
klog.Errorf("panic occurred: %v", r)
|
||||
|
@ -34,7 +34,8 @@ const chunkSize int64 = 512
|
||||
func getOMapValues(
|
||||
ctx context.Context,
|
||||
conn *Connection,
|
||||
poolName, namespace, oid, prefix string, keys []string) (map[string]string, error) {
|
||||
poolName, namespace, oid, prefix string, keys []string,
|
||||
) (map[string]string, error) {
|
||||
// fetch and configure the rados ioctx
|
||||
ioctx, err := conn.conn.GetIoctx(poolName)
|
||||
if err != nil {
|
||||
@ -93,7 +94,8 @@ func getOMapValues(
|
||||
func removeMapKeys(
|
||||
ctx context.Context,
|
||||
conn *Connection,
|
||||
poolName, namespace, oid string, keys []string) error {
|
||||
poolName, namespace, oid string, keys []string,
|
||||
) error {
|
||||
// fetch and configure the rados ioctx
|
||||
ioctx, err := conn.conn.GetIoctx(poolName)
|
||||
if err != nil {
|
||||
@ -129,7 +131,8 @@ func removeMapKeys(
|
||||
func setOMapKeys(
|
||||
ctx context.Context,
|
||||
conn *Connection,
|
||||
poolName, namespace, oid string, pairs map[string]string) error {
|
||||
poolName, namespace, oid string, pairs map[string]string,
|
||||
) error {
|
||||
// fetch and configure the rados ioctx
|
||||
ioctx, err := conn.conn.GetIoctx(poolName)
|
||||
if err != nil {
|
||||
|
@ -152,6 +152,9 @@ type Config struct {
|
||||
// ownerKey is used to identify the owner of the volume, can be used with some KMS configurations
|
||||
ownerKey string
|
||||
|
||||
// backingSnapshotIDKey ID of the snapshot on which the CephFS snapshot-backed volume is based
|
||||
backingSnapshotIDKey string
|
||||
|
||||
// commonPrefix is the prefix common to all omap keys for this Config
|
||||
commonPrefix string
|
||||
}
|
||||
@ -170,6 +173,7 @@ func NewCSIVolumeJournal(suffix string) *Config {
|
||||
csiImageIDKey: "csi.imageid",
|
||||
encryptKMSKey: "csi.volume.encryptKMS",
|
||||
ownerKey: "csi.volume.owner",
|
||||
backingSnapshotIDKey: "csi.volume.backingsnapshotid",
|
||||
commonPrefix: "csi.",
|
||||
}
|
||||
}
|
||||
@ -275,7 +279,8 @@ Return values:
|
||||
- error: non-nil in case of any errors
|
||||
*/
|
||||
func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
journalPool, reqName, namePrefix, snapParentName, kmsConfig string) (*ImageData, error) {
|
||||
journalPool, reqName, namePrefix, snapParentName, kmsConfig string,
|
||||
) (*ImageData, error) {
|
||||
var (
|
||||
snapSource bool
|
||||
objUUID string
|
||||
@ -415,7 +420,8 @@ Input arguments:
|
||||
different if image is created in a topology constrained pool)
|
||||
*/
|
||||
func (conn *Connection) UndoReservation(ctx context.Context,
|
||||
csiJournalPool, volJournalPool, volName, reqName string) error {
|
||||
csiJournalPool, volJournalPool, volName, reqName string,
|
||||
) error {
|
||||
// delete volume UUID omap (first, inverse of create order)
|
||||
|
||||
cj := conn.config
|
||||
@ -467,7 +473,8 @@ func reserveOMapName(
|
||||
ctx context.Context,
|
||||
monitors string,
|
||||
cr *util.Credentials,
|
||||
pool, namespace, oMapNamePrefix, volUUID string) (string, error) {
|
||||
pool, namespace, oMapNamePrefix, volUUID string,
|
||||
) (string, error) {
|
||||
var iterUUID string
|
||||
|
||||
maxAttempts := 5
|
||||
@ -525,6 +532,7 @@ Input arguments:
|
||||
- kmsConf: Name of the key management service used to encrypt the image (optional)
|
||||
- volUUID: UUID need to be reserved instead of auto-generating one (this is useful for mirroring and metro-DR)
|
||||
- owner: the owner of the volume (optional)
|
||||
- backingSnapshotID: ID of the snapshot on which the CephFS snapshot-backed volume is based (optional)
|
||||
|
||||
Return values:
|
||||
- string: Contains the UUID that was reserved for the passed in reqName
|
||||
@ -534,7 +542,9 @@ Return values:
|
||||
func (conn *Connection) ReserveName(ctx context.Context,
|
||||
journalPool string, journalPoolID int64,
|
||||
imagePool string, imagePoolID int64,
|
||||
reqName, namePrefix, parentName, kmsConf, volUUID, owner string) (string, string, error) {
|
||||
reqName, namePrefix, parentName, kmsConf, volUUID, owner,
|
||||
backingSnapshotID string,
|
||||
) (string, string, error) {
|
||||
// TODO: Take in-arg as ImageAttributes?
|
||||
var (
|
||||
snapSource bool
|
||||
@ -635,6 +645,11 @@ func (conn *Connection) ReserveName(ctx context.Context,
|
||||
omapValues[cj.cephSnapSourceKey] = parentName
|
||||
}
|
||||
|
||||
// Update backing snapshot ID for snapshot-backed CephFS volume
|
||||
if backingSnapshotID != "" {
|
||||
omapValues[cj.backingSnapshotIDKey] = backingSnapshotID
|
||||
}
|
||||
|
||||
err = setOMapKeys(ctx, conn, journalPool, cj.namespace, oid, omapValues)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
@ -652,13 +667,15 @@ type ImageAttributes struct {
|
||||
Owner string // Contains the owner to be used in combination with KmsID (for some KMS)
|
||||
ImageID string // Contains the image id
|
||||
JournalPoolID int64 // Pool ID of the CSI journal pool, stored in big endian format (on-disk data)
|
||||
BackingSnapshotID string // ID of the snapshot on which the CephFS snapshot-backed volume is based
|
||||
}
|
||||
|
||||
// GetImageAttributes fetches all keys and their values, from a UUID directory, returning ImageAttributes structure.
|
||||
func (conn *Connection) GetImageAttributes(
|
||||
ctx context.Context,
|
||||
pool, objectUUID string,
|
||||
snapSource bool) (*ImageAttributes, error) {
|
||||
snapSource bool,
|
||||
) (*ImageAttributes, error) {
|
||||
var (
|
||||
err error
|
||||
imageAttributes = &ImageAttributes{}
|
||||
@ -679,6 +696,7 @@ func (conn *Connection) GetImageAttributes(
|
||||
cj.cephSnapSourceKey,
|
||||
cj.csiImageIDKey,
|
||||
cj.ownerKey,
|
||||
cj.backingSnapshotIDKey,
|
||||
}
|
||||
values, err := getOMapValues(
|
||||
ctx, conn, pool, cj.namespace, cj.cephUUIDDirectoryPrefix+objectUUID,
|
||||
@ -695,6 +713,7 @@ func (conn *Connection) GetImageAttributes(
|
||||
imageAttributes.KmsID = values[cj.encryptKMSKey]
|
||||
imageAttributes.Owner = values[cj.ownerKey]
|
||||
imageAttributes.ImageID = values[cj.csiImageIDKey]
|
||||
imageAttributes.BackingSnapshotID = values[cj.backingSnapshotIDKey]
|
||||
|
||||
// image key was added at a later point, so not all volumes will have this
|
||||
// key set when ceph-csi was upgraded
|
||||
@ -782,7 +801,8 @@ func (conn *Connection) Destroy() {
|
||||
// CheckNewUUIDMapping checks is there any UUID mapping between old
|
||||
// volumeHandle and the newly generated volumeHandle.
|
||||
func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
|
||||
journalPool, volumeHandle string) (string, error) {
|
||||
journalPool, volumeHandle string,
|
||||
) (string, error) {
|
||||
cj := conn.config
|
||||
|
||||
// check if request name is already part of the directory omap
|
||||
@ -812,7 +832,8 @@ func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
|
||||
// secondary cluster cephcsi will generate the new mapping and keep it for
|
||||
// internal reference.
|
||||
func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context,
|
||||
journalPool, oldVolumeHandle, newVolumeHandle string) error {
|
||||
journalPool, oldVolumeHandle, newVolumeHandle string,
|
||||
) error {
|
||||
cj := conn.config
|
||||
|
||||
setKeys := map[string]string{
|
||||
|
@ -199,8 +199,7 @@ func (kms *awsMetadataKMS) EncryptDEK(volumeID, plainDEK string) (string, error)
|
||||
|
||||
// base64 encode the encrypted DEK, so that storing it should not have
|
||||
// issues
|
||||
encryptedDEK :=
|
||||
base64.StdEncoding.EncodeToString(result.CiphertextBlob)
|
||||
encryptedDEK := base64.StdEncoding.EncodeToString(result.CiphertextBlob)
|
||||
|
||||
return encryptedDEK, nil
|
||||
}
|
||||
|
@ -269,7 +269,8 @@ func RegisterProvider(provider Provider) bool {
|
||||
func (kf *kmsProviderList) buildKMS(
|
||||
tenant string,
|
||||
config map[string]interface{},
|
||||
secrets map[string]string) (EncryptionKMS, error) {
|
||||
secrets map[string]string,
|
||||
) (EncryptionKMS, error) {
|
||||
providerName, err := getProvider(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -138,7 +138,8 @@ func initSecretsMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) {
|
||||
// fetchEncryptionPassphrase fetches encryptionPassphrase from user provided secret.
|
||||
func (kms secretsMetadataKMS) fetchEncryptionPassphrase(
|
||||
config map[string]interface{},
|
||||
defaultNamespace string) (string, error) {
|
||||
defaultNamespace string,
|
||||
) (string, error) {
|
||||
var (
|
||||
secretName string
|
||||
secretNamespace string
|
||||
|
@ -302,6 +302,7 @@ func (vc *vaultConnection) Destroy() {
|
||||
tmpFile, ok := vc.vaultConfig[api.EnvVaultCACert]
|
||||
if ok {
|
||||
// ignore error on failure to remove tmpfile (gosec complains)
|
||||
//nolint:forcetypeassert // ignore error on failure to remove tmpfile
|
||||
_ = os.Remove(tmpFile.(string))
|
||||
}
|
||||
}
|
||||
|
@ -23,9 +23,13 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util/k8s"
|
||||
|
||||
"github.com/libopenstorage/secrets/vault"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -35,6 +39,9 @@ const (
|
||||
// should be available in the Tenants namespace. This ServiceAccount
|
||||
// will be used to connect to Hashicorp Vault.
|
||||
vaultTenantSAName = "ceph-csi-vault-sa"
|
||||
// Kubernetes version which requires ServiceAccount token creation.
|
||||
// Kubernetes 1.24 => 1 * 1000 + 24.
|
||||
kubeMinVersionForCreateToken = 1024
|
||||
)
|
||||
|
||||
/*
|
||||
@ -292,9 +299,9 @@ func (kms *vaultTenantSA) getToken() (string, error) {
|
||||
}
|
||||
|
||||
for _, secretRef := range sa.Secrets {
|
||||
secret, err := c.CoreV1().Secrets(kms.Tenant).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get Secret %s/%s: %w", kms.Tenant, secretRef.Name, err)
|
||||
secret, sErr := c.CoreV1().Secrets(kms.Tenant).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
|
||||
if sErr != nil {
|
||||
return "", fmt.Errorf("failed to get Secret %s/%s: %w", kms.Tenant, secretRef.Name, sErr)
|
||||
}
|
||||
|
||||
token, ok := secret.Data["token"]
|
||||
@ -303,7 +310,7 @@ func (kms *vaultTenantSA) getToken() (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find token in ServiceAccount %s/%s", kms.Tenant, kms.tenantSAName)
|
||||
return kms.createToken(sa, c)
|
||||
}
|
||||
|
||||
// getTokenPath creates a temporary directory structure that contains the token
|
||||
@ -327,3 +334,33 @@ func (kms *vaultTenantSA) getTokenPath() (string, error) {
|
||||
|
||||
return dir + "/token", nil
|
||||
}
|
||||
|
||||
// createToken creates required service account token for kubernetes 1.24+,
|
||||
// else returns error.
|
||||
// From kubernetes v1.24+, secret for service account tokens are not
|
||||
// automatically created. Hence, use the create token api to fetch it.
|
||||
// refer: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md \
|
||||
// #no-really-you-must-read-this-before-you-upgrade-1 .
|
||||
func (kms *vaultTenantSA) createToken(sa *corev1.ServiceAccount, client *kubernetes.Clientset) (string, error) {
|
||||
major, minor, err := k8s.GetServerVersion(client)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get server version: %w", err)
|
||||
}
|
||||
|
||||
if (major*1000 + minor) >= kubeMinVersionForCreateToken {
|
||||
tokenRequest := &authenticationv1.TokenRequest{}
|
||||
token, err := client.CoreV1().ServiceAccounts(kms.Tenant).CreateToken(
|
||||
context.TODO(),
|
||||
sa.Name,
|
||||
tokenRequest,
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create token for service account %s/%s: %w", kms.Tenant, sa.Name, err)
|
||||
}
|
||||
|
||||
return token.Status.Token, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find token in ServiceAccount %s/%s", kms.Tenant, kms.tenantSAName)
|
||||
}
|
||||
|
@ -120,6 +120,7 @@ func TestInitVaultTokensKMS(t *testing.T) {
|
||||
// add tenant "bob"
|
||||
bob := make(map[string]interface{})
|
||||
bob["vaultAddress"] = "https://vault.bob.example.org"
|
||||
//nolint:forcetypeassert // as its a test we dont need to check assertion here.
|
||||
args.Config["tenants"].(map[string]interface{})["bob"] = bob
|
||||
|
||||
_, err = initVaultTokensKMS(args)
|
||||
|
@ -57,7 +57,8 @@ func NewControllerServer(d *csicommon.CSIDriver) *Server {
|
||||
// capabilities that were set in the Driver.Run() function.
|
||||
func (cs *Server) ControllerGetCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
req *csi.ControllerGetCapabilitiesRequest,
|
||||
) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
return cs.backendServer.ControllerGetCapabilities(ctx, req)
|
||||
}
|
||||
|
||||
@ -65,7 +66,8 @@ func (cs *Server) ControllerGetCapabilities(
|
||||
// are supported.
|
||||
func (cs *Server) ValidateVolumeCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
req *csi.ValidateVolumeCapabilitiesRequest,
|
||||
) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
return cs.backendServer.ValidateVolumeCapabilities(ctx, req)
|
||||
}
|
||||
|
||||
@ -73,7 +75,8 @@ func (cs *Server) ValidateVolumeCapabilities(
|
||||
// created entities.
|
||||
func (cs *Server) CreateVolume(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
req *csi.CreateVolumeRequest,
|
||||
) (*csi.CreateVolumeResponse, error) {
|
||||
res, err := cs.backendServer.CreateVolume(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -120,7 +123,8 @@ func (cs *Server) CreateVolume(
|
||||
// DeleteVolume deletes the volume in backend and its reservation.
|
||||
func (cs *Server) DeleteVolume(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
req *csi.DeleteVolumeRequest,
|
||||
) (*csi.DeleteVolumeResponse, error) {
|
||||
secret := req.GetSecrets()
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
@ -157,7 +161,8 @@ func (cs *Server) DeleteVolume(
|
||||
// new size.
|
||||
func (cs *Server) ControllerExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
req *csi.ControllerExpandVolumeRequest,
|
||||
) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
return cs.backendServer.ControllerExpandVolume(ctx, req)
|
||||
}
|
||||
|
||||
@ -165,7 +170,8 @@ func (cs *Server) ControllerExpandVolume(
|
||||
// There is no interaction with the NFS-server needed for snapshot creation.
|
||||
func (cs *Server) CreateSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
req *csi.CreateSnapshotRequest,
|
||||
) (*csi.CreateSnapshotResponse, error) {
|
||||
return cs.backendServer.CreateSnapshot(ctx, req)
|
||||
}
|
||||
|
||||
@ -173,6 +179,7 @@ func (cs *Server) CreateSnapshot(
|
||||
// There is no interaction with the NFS-server needed for snapshot creation.
|
||||
func (cs *Server) DeleteSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
||||
req *csi.DeleteSnapshotRequest,
|
||||
) (*csi.DeleteSnapshotResponse, error) {
|
||||
return cs.backendServer.DeleteSnapshot(ctx, req)
|
||||
}
|
||||
|
@ -40,7 +40,8 @@ func NewIdentityServer(d *csicommon.CSIDriver) *Server {
|
||||
// GetPluginCapabilities returns available capabilities of the ceph driver.
|
||||
func (is *Server) GetPluginCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
req *csi.GetPluginCapabilitiesRequest,
|
||||
) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
@ -51,6 +52,9 @@ type ControllerServer struct {
|
||||
|
||||
// A map storing all volumes/snapshots with ongoing operations.
|
||||
OperationLocks *util.OperationLock
|
||||
|
||||
// Cluster name
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.CreateVolumeRequest) error {
|
||||
@ -91,6 +95,43 @@ func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.Crea
|
||||
return err
|
||||
}
|
||||
|
||||
err = validateStriping(req.Parameters)
|
||||
if err != nil {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateStriping(parameters map[string]string) error {
|
||||
stripeUnit := parameters["stripeUnit"]
|
||||
stripeCount := parameters["stripeCount"]
|
||||
if stripeUnit != "" && stripeCount == "" {
|
||||
return errors.New("stripeCount must be specified when stripeUnit is specified")
|
||||
}
|
||||
|
||||
if stripeUnit == "" && stripeCount != "" {
|
||||
return errors.New("stripeUnit must be specified when stripeCount is specified")
|
||||
}
|
||||
|
||||
objectSize := parameters["objectSize"]
|
||||
if objectSize != "" {
|
||||
objSize, err := strconv.ParseUint(objectSize, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse objectSize %s: %w", objectSize, err)
|
||||
}
|
||||
// check objectSize is power of 2
|
||||
/*
|
||||
Take 2^3=8 for example.
|
||||
x & (x-1)
|
||||
8 & 7
|
||||
1000 & 0111 = 0000
|
||||
*/
|
||||
if objSize == 0 || (objSize&(objSize-1)) != 0 {
|
||||
return fmt.Errorf("objectSize %s is not power of 2", objectSize)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -98,7 +139,8 @@ func (cs *ControllerServer) validateVolumeReq(ctx context.Context, req *csi.Crea
|
||||
// request arguments for subsequent calls.
|
||||
func (cs *ControllerServer) parseVolCreateRequest(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest) (*rbdVolume, error) {
|
||||
req *csi.CreateVolumeRequest,
|
||||
) (*rbdVolume, error) {
|
||||
// TODO (sbezverk) Last check for not exceeding total storage capacity
|
||||
|
||||
// below capability check indicates that we support both {SINGLE_NODE or MULTI_NODE} WRITERs and the `isMultiWriter`
|
||||
@ -131,6 +173,8 @@ func (cs *ControllerServer) parseVolCreateRequest(
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
rbdVol.ClusterName = cs.ClusterName
|
||||
|
||||
// if the KMS is of type VaultToken, additional metadata is needed
|
||||
// depending on the tenant, the KMS can be configured with other
|
||||
// options
|
||||
@ -195,8 +239,7 @@ func buildCreateVolumeResponse(req *csi.CreateVolumeRequest, rbdVol *rbdVolume)
|
||||
ContentSource: req.GetVolumeContentSource(),
|
||||
}
|
||||
if rbdVol.Topology != nil {
|
||||
volume.AccessibleTopology =
|
||||
[]*csi.Topology{
|
||||
volume.AccessibleTopology = []*csi.Topology{
|
||||
{
|
||||
Segments: rbdVol.Topology,
|
||||
},
|
||||
@ -252,7 +295,8 @@ func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSna
|
||||
// CreateVolume creates the volume in backend.
|
||||
func (cs *ControllerServer) CreateVolume(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
req *csi.CreateVolumeRequest,
|
||||
) (*csi.CreateVolumeResponse, error) {
|
||||
err := cs.validateVolumeReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -349,7 +393,8 @@ func flattenParentImage(
|
||||
ctx context.Context,
|
||||
rbdVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cr *util.Credentials) error {
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
// flatten the image's parent before the reservation to avoid
|
||||
// stale entries in post creation if we return ABORT error and the
|
||||
// DeleteVolume RPC is not called.
|
||||
@ -417,7 +462,8 @@ func flattenParentImage(
|
||||
// that the state is corrected to what was requested. It is needed to call this
|
||||
// when the process of creating a volume was interrupted.
|
||||
func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials, rbdVol *rbdVolume, rbdSnap *rbdSnapshot) (*csi.CreateVolumeResponse, error) {
|
||||
cr *util.Credentials, rbdVol *rbdVolume, rbdSnap *rbdSnapshot,
|
||||
) (*csi.CreateVolumeResponse, error) {
|
||||
vcs := req.GetVolumeContentSource()
|
||||
|
||||
switch {
|
||||
@ -558,7 +604,8 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string,
|
||||
rbdVol *rbdVolume,
|
||||
snapshotID string) error {
|
||||
snapshotID string,
|
||||
) error {
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
||||
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
@ -622,7 +669,8 @@ func (cs *ControllerServer) createBackingImage(
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string,
|
||||
rbdVol, parentVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot) error {
|
||||
rbdSnap *rbdSnapshot,
|
||||
) error {
|
||||
var err error
|
||||
|
||||
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
||||
@ -682,7 +730,8 @@ func (cs *ControllerServer) createBackingImage(
|
||||
func checkContentSource(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*rbdVolume, *rbdSnapshot, error) {
|
||||
cr *util.Credentials,
|
||||
) (*rbdVolume, *rbdSnapshot, error) {
|
||||
if req.VolumeContentSource == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
@ -743,7 +792,8 @@ func (cs *ControllerServer) checkErrAndUndoReserve(
|
||||
ctx context.Context,
|
||||
err error,
|
||||
volumeID string,
|
||||
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
|
||||
rbdVol *rbdVolume, cr *util.Credentials,
|
||||
) (*csi.DeleteVolumeResponse, error) {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
log.WarningLog(ctx, "failed to get backend volume for %s: %v", volumeID, err)
|
||||
|
||||
@ -790,7 +840,8 @@ func (cs *ControllerServer) checkErrAndUndoReserve(
|
||||
// from store.
|
||||
func (cs *ControllerServer) DeleteVolume(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
req *csi.DeleteVolumeRequest,
|
||||
) (*csi.DeleteVolumeResponse, error) {
|
||||
var err error
|
||||
if err = cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||
@ -860,7 +911,8 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
|
||||
// cleanupRBDImage removes the rbd image and OMAP metadata associated with it.
|
||||
func cleanupRBDImage(ctx context.Context,
|
||||
rbdVol *rbdVolume, cr *util.Credentials) (*csi.DeleteVolumeResponse, error) {
|
||||
rbdVol *rbdVolume, cr *util.Credentials,
|
||||
) (*csi.DeleteVolumeResponse, error) {
|
||||
mirroringInfo, err := rbdVol.getImageMirroringInfo()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
@ -954,7 +1006,8 @@ func cleanupRBDImage(ctx context.Context,
|
||||
// are supported.
|
||||
func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
req *csi.ValidateVolumeCapabilitiesRequest,
|
||||
) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
if req.GetVolumeId() == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
|
||||
}
|
||||
@ -980,7 +1033,8 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||
// nolint:gocyclo,cyclop // TODO: reduce complexity.
|
||||
func (cs *ControllerServer) CreateSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
req *csi.CreateSnapshotRequest,
|
||||
) (*csi.CreateSnapshotResponse, error) {
|
||||
if err := cs.validateSnapshotReq(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1079,6 +1133,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
|
||||
// Update the metadata on snapshot not on the original image
|
||||
rbdVol.RbdImageName = rbdSnap.RbdSnapName
|
||||
rbdVol.ClusterName = cs.ClusterName
|
||||
|
||||
err = rbdVol.unsetAllMetadata(k8s.GetVolumeMetadataKeys())
|
||||
if err != nil {
|
||||
@ -1110,7 +1165,8 @@ func cloneFromSnapshot(
|
||||
rbdVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cr *util.Credentials,
|
||||
parameters map[string]string) (*csi.CreateSnapshotResponse, error) {
|
||||
parameters map[string]string,
|
||||
) (*csi.CreateSnapshotResponse, error) {
|
||||
vol := generateVolFromSnap(rbdSnap)
|
||||
err := vol.Connect(cr)
|
||||
if err != nil {
|
||||
@ -1193,7 +1249,8 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
ctx context.Context,
|
||||
parentVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cr *util.Credentials) (*rbdVolume, error) {
|
||||
cr *util.Credentials,
|
||||
) (*rbdVolume, error) {
|
||||
// generate cloned volume details from snapshot
|
||||
cloneRbd := generateVolFromSnap(rbdSnap)
|
||||
defer cloneRbd.Destroy()
|
||||
@ -1276,7 +1333,8 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
// snapshot metadata from store.
|
||||
func (cs *ControllerServer) DeleteSnapshot(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
||||
req *csi.DeleteSnapshotRequest,
|
||||
) (*csi.DeleteSnapshotResponse, error) {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
@ -1417,7 +1475,8 @@ func cleanUpImageAndSnapReservation(ctx context.Context, rbdSnap *rbdSnapshot, c
|
||||
// ControllerExpandVolume expand RBD Volumes on demand based on resizer request.
|
||||
func (cs *ControllerServer) ControllerExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
req *csi.ControllerExpandVolumeRequest,
|
||||
) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "invalid expand volume req: %v", protosanitizer.StripSecrets(req))
|
||||
|
88
internal/rbd/controllerserver_test.go
Normal file
88
internal/rbd/controllerserver_test.go
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright 2022 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rbd
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestValidateStriping(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
parameters map[string]string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "when stripeUnit is not specified",
|
||||
parameters: map[string]string{
|
||||
"stripeUnit": "",
|
||||
"stripeCount": "10",
|
||||
"objectSize": "2",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "when stripeCount is not specified",
|
||||
parameters: map[string]string{
|
||||
"stripeUnit": "4096",
|
||||
"stripeCount": "",
|
||||
"objectSize": "2",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "when objectSize is not power of 2",
|
||||
parameters: map[string]string{
|
||||
"stripeUnit": "4096",
|
||||
"stripeCount": "8",
|
||||
"objectSize": "3",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "when objectSize is 0",
|
||||
parameters: map[string]string{
|
||||
"stripeUnit": "4096",
|
||||
"stripeCount": "8",
|
||||
"objectSize": "0",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "when valid stripe parameters are specified",
|
||||
parameters: map[string]string{
|
||||
"stripeUnit": "4096",
|
||||
"stripeCount": "8",
|
||||
"objectSize": "131072",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "when no stripe parameters are specified",
|
||||
parameters: map[string]string{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
ts := tt
|
||||
t.Run(ts.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if err := validateStriping(ts.parameters); (err != nil) != ts.wantErr {
|
||||
t.Errorf("validateStriping() error = %v, wantErr %v", err, ts.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -161,6 +161,7 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
|
||||
if conf.IsControllerServer {
|
||||
r.cs = NewControllerServer(r.cd)
|
||||
r.cs.ClusterName = conf.ClusterName
|
||||
r.rs = NewReplicationServer(r.cs)
|
||||
}
|
||||
if !conf.IsControllerServer && !conf.IsNodeServer {
|
||||
|
@ -273,7 +273,8 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str
|
||||
// ParseEncryptionOpts returns kmsID and sets Owner attribute.
|
||||
func (ri *rbdImage) ParseEncryptionOpts(
|
||||
ctx context.Context,
|
||||
volOptions map[string]string) (string, error) {
|
||||
volOptions map[string]string,
|
||||
) (string, error) {
|
||||
var (
|
||||
err error
|
||||
ok bool
|
||||
|
@ -33,7 +33,8 @@ type IdentityServer struct {
|
||||
// GetPluginCapabilities returns available capabilities of the rbd driver.
|
||||
func (is *IdentityServer) GetPluginCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
req *csi.GetPluginCapabilitiesRequest,
|
||||
) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
|
@ -148,7 +148,8 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
|
||||
func populateRbdVol(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
cr *util.Credentials) (*rbdVolume, error) {
|
||||
cr *util.Credentials,
|
||||
) (*rbdVolume, error) {
|
||||
var err error
|
||||
var j *journal.Connection
|
||||
volID := req.GetVolumeId()
|
||||
@ -295,7 +296,8 @@ func populateRbdVol(
|
||||
// - Stage the device (mount the device mapped for image)
|
||||
func (ns *NodeServer) NodeStageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
) (*csi.NodeStageVolumeResponse, error) {
|
||||
var err error
|
||||
if err = util.ValidateNodeStageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
@ -384,7 +386,8 @@ func (ns *NodeServer) stageTransaction(
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
cr *util.Credentials,
|
||||
volOptions *rbdVolume,
|
||||
staticVol bool) (*stageTransaction, error) {
|
||||
staticVol bool,
|
||||
) (*stageTransaction, error) {
|
||||
transaction := &stageTransaction{}
|
||||
|
||||
var err error
|
||||
@ -466,7 +469,8 @@ func resizeNodeStagePath(ctx context.Context,
|
||||
isBlock bool,
|
||||
transaction *stageTransaction,
|
||||
volID,
|
||||
stagingTargetPath string) error {
|
||||
stagingTargetPath string,
|
||||
) error {
|
||||
var err error
|
||||
devicePath := transaction.devicePath
|
||||
var ok bool
|
||||
@ -543,7 +547,8 @@ func resizeEncryptedDevice(ctx context.Context, volID, stagingTargetPath, device
|
||||
|
||||
func flattenImageBeforeMapping(
|
||||
ctx context.Context,
|
||||
volOptions *rbdVolume) error {
|
||||
volOptions *rbdVolume,
|
||||
) error {
|
||||
var err error
|
||||
var feature bool
|
||||
var depth uint
|
||||
@ -579,7 +584,8 @@ func (ns *NodeServer) undoStagingTransaction(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
transaction *stageTransaction,
|
||||
volOptions *rbdVolume) {
|
||||
volOptions *rbdVolume,
|
||||
) {
|
||||
var err error
|
||||
|
||||
stagingTargetPath := getStagingTargetPath(req)
|
||||
@ -661,7 +667,8 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
|
||||
// path.
|
||||
func (ns *NodeServer) NodePublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
req *csi.NodePublishVolumeRequest,
|
||||
) (*csi.NodePublishVolumeResponse, error) {
|
||||
err := util.ValidateNodePublishVolumeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -700,7 +707,8 @@ func (ns *NodeServer) mountVolumeToStagePath(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest,
|
||||
staticVol bool,
|
||||
stagingPath, devicePath string) error {
|
||||
stagingPath, devicePath string,
|
||||
) error {
|
||||
readOnly := false
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
|
||||
@ -841,7 +849,8 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
|
||||
// NodeUnpublishVolume unmounts the volume from the target path.
|
||||
func (ns *NodeServer) NodeUnpublishVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
req *csi.NodeUnpublishVolumeRequest,
|
||||
) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
err := util.ValidateNodeUnpublishVolumeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -898,7 +907,8 @@ func getStagingTargetPath(req interface{}) string {
|
||||
// NodeUnstageVolume unstages the volume from the staging path.
|
||||
func (ns *NodeServer) NodeUnstageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
req *csi.NodeUnstageVolumeRequest,
|
||||
) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
var err error
|
||||
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
@ -1004,7 +1014,8 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
// NodeExpandVolume resizes rbd volumes.
|
||||
func (ns *NodeServer) NodeExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
||||
req *csi.NodeExpandVolumeRequest,
|
||||
) (*csi.NodeExpandVolumeResponse, error) {
|
||||
volumeID := req.GetVolumeId()
|
||||
if volumeID == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "volume ID must be provided")
|
||||
@ -1078,7 +1089,8 @@ func (ns *NodeServer) NodeExpandVolume(
|
||||
// NodeGetCapabilities returns the supported capabilities of the node server.
|
||||
func (ns *NodeServer) NodeGetCapabilities(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
req *csi.NodeGetCapabilitiesRequest,
|
||||
) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
return &csi.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csi.NodeServiceCapability{
|
||||
{
|
||||
@ -1116,7 +1128,8 @@ func (ns *NodeServer) NodeGetCapabilities(
|
||||
func (ns *NodeServer) processEncryptedDevice(
|
||||
ctx context.Context,
|
||||
volOptions *rbdVolume,
|
||||
devicePath string) (string, error) {
|
||||
devicePath string,
|
||||
) (string, error) {
|
||||
imageSpec := volOptions.String()
|
||||
encrypted, err := volOptions.checkRbdImageEncrypted(ctx)
|
||||
if err != nil {
|
||||
@ -1212,7 +1225,8 @@ func (ns *NodeServer) xfsSupportsReflink() bool {
|
||||
// NodeGetVolumeStats returns volume stats.
|
||||
func (ns *NodeServer) NodeGetVolumeStats(
|
||||
ctx context.Context,
|
||||
req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
req *csi.NodeGetVolumeStatsRequest,
|
||||
) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
var err error
|
||||
targetPath := req.GetVolumePath()
|
||||
if targetPath == "" {
|
||||
|
@ -538,7 +538,8 @@ func detachRBDDevice(ctx context.Context, devicePath, volumeID, unmapOptions str
|
||||
// when imageSpec is used to decide if image is already unmapped.
|
||||
func detachRBDImageOrDeviceSpec(
|
||||
ctx context.Context,
|
||||
dArgs *detachRBDImageArgs) error {
|
||||
dArgs *detachRBDImageArgs,
|
||||
) error {
|
||||
if dArgs.encrypted {
|
||||
mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
|
||||
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
|
||||
|
@ -116,7 +116,8 @@ func checkSnapCloneExists(
|
||||
ctx context.Context,
|
||||
parentVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cr *util.Credentials) (bool, error) {
|
||||
cr *util.Credentials,
|
||||
) (bool, error) {
|
||||
err := validateRbdSnap(rbdSnap)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -392,7 +393,7 @@ func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, c
|
||||
|
||||
rbdSnap.ReservedID, rbdSnap.RbdSnapName, err = j.ReserveName(
|
||||
ctx, rbdSnap.JournalPool, journalPoolID, rbdSnap.Pool, imagePoolID,
|
||||
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner)
|
||||
rbdSnap.RequestName, rbdSnap.NamePrefix, rbdVol.RbdImageName, kmsID, rbdSnap.ReservedID, rbdVol.Owner, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -472,7 +473,7 @@ func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr
|
||||
|
||||
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
|
||||
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner)
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, rbdVol.ReservedID, rbdVol.Owner, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -540,8 +541,10 @@ func RegenerateJournal(
|
||||
claimName,
|
||||
volumeID,
|
||||
requestName,
|
||||
owner string,
|
||||
cr *util.Credentials) (string, error) {
|
||||
owner,
|
||||
clusterName string,
|
||||
cr *util.Credentials,
|
||||
) (string, error) {
|
||||
ctx := context.Background()
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
@ -553,6 +556,7 @@ func RegenerateJournal(
|
||||
|
||||
rbdVol = &rbdVolume{}
|
||||
rbdVol.VolID = volumeID
|
||||
rbdVol.ClusterName = clusterName
|
||||
|
||||
err = vi.DecomposeCSIID(rbdVol.VolID)
|
||||
if err != nil {
|
||||
@ -633,7 +637,7 @@ func RegenerateJournal(
|
||||
|
||||
rbdVol.ReservedID, rbdVol.RbdImageName, err = j.ReserveName(
|
||||
ctx, rbdVol.JournalPool, journalPoolID, rbdVol.Pool, imagePoolID,
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner)
|
||||
rbdVol.RequestName, rbdVol.NamePrefix, "", kmsID, vi.ObjectUUID, rbdVol.Owner, "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -78,6 +79,9 @@ const (
|
||||
|
||||
// krbd attribute file to check supported features.
|
||||
krbdSupportedFeaturesFile = "/sys/bus/rbd/supported_features"
|
||||
|
||||
// clusterNameKey cluster Key, set on RBD image.
|
||||
clusterNameKey = "csi.ceph.com/cluster/name"
|
||||
)
|
||||
|
||||
// rbdImage contains common attributes and methods for the rbdVolume and
|
||||
@ -96,6 +100,11 @@ type rbdImage struct {
|
||||
// VolSize is the size of the RBD image backing this rbdImage.
|
||||
VolSize int64
|
||||
|
||||
// image striping configurations.
|
||||
StripeCount uint64
|
||||
StripeUnit uint64
|
||||
ObjectSize uint64
|
||||
|
||||
Monitors string
|
||||
// JournalPool is the ceph pool in which the CSI Journal/CSI snapshot Journal is
|
||||
// stored
|
||||
@ -121,6 +130,9 @@ type rbdImage struct {
|
||||
// Primary represent if the image is primary or not.
|
||||
Primary bool
|
||||
|
||||
// Cluster name
|
||||
ClusterName string
|
||||
|
||||
// encryption provides access to optional VolumeEncryption functions
|
||||
encryption *util.VolumeEncryption
|
||||
// Owner is the creator (tenant, Kubernetes Namespace) of the volume
|
||||
@ -402,27 +414,19 @@ func (rs *rbdSnapshot) String() string {
|
||||
// createImage creates a new ceph image with provision and volume options.
|
||||
func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
|
||||
volSzMiB := fmt.Sprintf("%dM", util.RoundOffVolSize(pOpts.VolSize))
|
||||
options := librbd.NewRbdImageOptions()
|
||||
|
||||
logMsg := "rbd: create %s size %s (features: %s) using mon %s"
|
||||
if pOpts.DataPool != "" {
|
||||
logMsg += fmt.Sprintf(", data pool %s", pOpts.DataPool)
|
||||
err := options.SetString(librbd.RbdImageOptionDataPool, pOpts.DataPool)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set data pool: %w", err)
|
||||
}
|
||||
}
|
||||
log.DebugLog(ctx, logMsg,
|
||||
log.DebugLog(ctx, "rbd: create %s size %s (features: %s) using mon %s",
|
||||
pOpts, volSzMiB, pOpts.ImageFeatureSet.Names(), pOpts.Monitors)
|
||||
|
||||
if pOpts.ImageFeatureSet != 0 {
|
||||
err := options.SetUint64(librbd.RbdImageOptionFeatures, uint64(pOpts.ImageFeatureSet))
|
||||
options := librbd.NewRbdImageOptions()
|
||||
defer options.Destroy()
|
||||
|
||||
err := pOpts.setImageOptions(ctx, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set image features: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := pOpts.Connect(cr)
|
||||
err = pOpts.Connect(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -722,7 +726,8 @@ func flattenClonedRbdImages(
|
||||
ctx context.Context,
|
||||
snaps []librbd.SnapInfo,
|
||||
pool, monitors, rbdImageName string,
|
||||
cr *util.Credentials) error {
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
rv := &rbdVolume{}
|
||||
rv.Monitors = monitors
|
||||
rv.Pool = pool
|
||||
@ -769,7 +774,8 @@ func flattenClonedRbdImages(
|
||||
func (ri *rbdImage) flattenRbdImage(
|
||||
ctx context.Context,
|
||||
forceFlatten bool,
|
||||
hardlimit, softlimit uint) error {
|
||||
hardlimit, softlimit uint,
|
||||
) error {
|
||||
var depth uint
|
||||
var err error
|
||||
|
||||
@ -926,7 +932,8 @@ func genSnapFromSnapID(
|
||||
rbdSnap *rbdSnapshot,
|
||||
snapshotID string,
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) error {
|
||||
secrets map[string]string,
|
||||
) error {
|
||||
var vi util.CSIIdentifier
|
||||
|
||||
rbdSnap.VolID = snapshotID
|
||||
@ -1036,7 +1043,8 @@ func generateVolumeFromVolumeID(
|
||||
volumeID string,
|
||||
vi util.CSIIdentifier,
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) (*rbdVolume, error) {
|
||||
secrets map[string]string,
|
||||
) (*rbdVolume, error) {
|
||||
var (
|
||||
rbdVol *rbdVolume
|
||||
err error
|
||||
@ -1123,7 +1131,8 @@ func GenVolFromVolID(
|
||||
ctx context.Context,
|
||||
volumeID string,
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) (*rbdVolume, error) {
|
||||
secrets map[string]string,
|
||||
) (*rbdVolume, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
vol *rbdVolume
|
||||
@ -1165,7 +1174,8 @@ func generateVolumeFromMapping(
|
||||
volumeID string,
|
||||
vi util.CSIIdentifier,
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) (*rbdVolume, error) {
|
||||
secrets map[string]string,
|
||||
) (*rbdVolume, error) {
|
||||
nvi := vi
|
||||
vol := &rbdVolume{}
|
||||
// extract clusterID mapping
|
||||
@ -1215,7 +1225,8 @@ func generateVolumeFromMapping(
|
||||
func genVolFromVolumeOptions(
|
||||
ctx context.Context,
|
||||
volOptions map[string]string,
|
||||
disableInUseChecks, checkClusterIDMapping bool) (*rbdVolume, error) {
|
||||
disableInUseChecks, checkClusterIDMapping bool,
|
||||
) (*rbdVolume, error) {
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
@ -1267,9 +1278,40 @@ func genVolFromVolumeOptions(
|
||||
rbdVol.Mounter)
|
||||
rbdVol.DisableInUseChecks = disableInUseChecks
|
||||
|
||||
err = rbdVol.setStripeConfiguration(volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rbdVol, nil
|
||||
}
|
||||
|
||||
func (ri *rbdImage) setStripeConfiguration(options map[string]string) error {
|
||||
var err error
|
||||
if val, ok := options["stripeUnit"]; ok {
|
||||
ri.StripeUnit, err = strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse stripeUnit %s: %w", val, err)
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := options["stripeCount"]; ok {
|
||||
ri.StripeCount, err = strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse stripeCount %s: %w", val, err)
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := options["objectSize"]; ok {
|
||||
ri.ObjectSize, err = strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse objectSize %s: %w", val, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rv *rbdVolume) validateImageFeatures(imageFeatures string) error {
|
||||
// It is possible for image features to be an empty string which
|
||||
// the Go split function would return a single item array with
|
||||
@ -1368,9 +1410,11 @@ func (ri *rbdImage) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) erro
|
||||
func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
||||
ctx context.Context,
|
||||
pSnapOpts *rbdSnapshot,
|
||||
parentVol *rbdVolume) error {
|
||||
parentVol *rbdVolume,
|
||||
) error {
|
||||
var err error
|
||||
logMsg := "rbd: clone %s %s (features: %s) using mon %s"
|
||||
log.DebugLog(ctx, "rbd: clone %s %s (features: %s) using mon %s",
|
||||
pSnapOpts, rv, rv.ImageFeatureSet.Names(), rv.Monitors)
|
||||
|
||||
err = parentVol.openIoctx()
|
||||
if err != nil {
|
||||
@ -1383,30 +1427,15 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
||||
|
||||
options := librbd.NewRbdImageOptions()
|
||||
defer options.Destroy()
|
||||
|
||||
if rv.DataPool != "" {
|
||||
logMsg += fmt.Sprintf(", data pool %s", rv.DataPool)
|
||||
err = options.SetString(librbd.RbdImageOptionDataPool, rv.DataPool)
|
||||
err = rv.setImageOptions(ctx, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set data pool: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.DebugLog(ctx, logMsg,
|
||||
pSnapOpts, rv, rv.ImageFeatureSet.Names(), rv.Monitors)
|
||||
|
||||
if rv.ImageFeatureSet != 0 {
|
||||
err = options.SetUint64(librbd.RbdImageOptionFeatures, uint64(rv.ImageFeatureSet))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set image features: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = options.SetUint64(librbd.ImageOptionCloneFormat, 2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set image features: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// As the clone is yet to be created, open the Ioctx.
|
||||
err = rv.openIoctx()
|
||||
if err != nil {
|
||||
@ -1447,6 +1476,52 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
||||
return nil
|
||||
}
|
||||
|
||||
// setImageOptions sets the image options.
|
||||
func (rv *rbdVolume) setImageOptions(ctx context.Context, options *librbd.ImageOptions) error {
|
||||
var err error
|
||||
|
||||
logMsg := fmt.Sprintf("setting image options on %s", rv)
|
||||
if rv.DataPool != "" {
|
||||
logMsg += fmt.Sprintf(", data pool %s", rv.DataPool)
|
||||
err = options.SetString(librbd.RbdImageOptionDataPool, rv.DataPool)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set data pool: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if rv.ImageFeatureSet != 0 {
|
||||
err = options.SetUint64(librbd.RbdImageOptionFeatures, uint64(rv.ImageFeatureSet))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set image features: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if rv.StripeCount != 0 {
|
||||
logMsg += fmt.Sprintf(", stripe count %d, stripe unit %d", rv.StripeCount, rv.StripeUnit)
|
||||
err = options.SetUint64(librbd.RbdImageOptionStripeCount, rv.StripeCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set stripe count: %w", err)
|
||||
}
|
||||
err = options.SetUint64(librbd.RbdImageOptionStripeUnit, rv.StripeUnit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set stripe unit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if rv.ObjectSize != 0 {
|
||||
order := uint64(math.Log2(float64(rv.ObjectSize)))
|
||||
logMsg += fmt.Sprintf(", object size %d, order %d", rv.ObjectSize, order)
|
||||
err = options.SetUint64(librbd.RbdImageOptionOrder, order)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set object size: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.DebugLog(ctx, logMsg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
||||
// ErrImageNotFound if provided image is not found.
|
||||
func (ri *rbdImage) getImageInfo() error {
|
||||
@ -1904,7 +1979,8 @@ func (ri *rbdImage) isCompabitableClone(dst *rbdImage) error {
|
||||
|
||||
func (ri *rbdImage) addSnapshotScheduling(
|
||||
interval admin.Interval,
|
||||
startTime admin.StartTime) error {
|
||||
startTime admin.StartTime,
|
||||
) error {
|
||||
ls := admin.NewLevelSpec(ri.Pool, ri.RadosNamespace, ri.RbdImageName)
|
||||
ra, err := ri.conn.GetRBDAdmin()
|
||||
if err != nil {
|
||||
@ -1965,7 +2041,8 @@ func strategicActionOnLogFile(ctx context.Context, logStrategy, logFile string)
|
||||
|
||||
// genVolFromVolIDWithMigration populate a rbdVol structure based on the volID format.
|
||||
func genVolFromVolIDWithMigration(
|
||||
ctx context.Context, volID string, cr *util.Credentials, secrets map[string]string) (*rbdVolume, error) {
|
||||
ctx context.Context, volID string, cr *util.Credentials, secrets map[string]string,
|
||||
) (*rbdVolume, error) {
|
||||
if isMigrationVolID(volID) {
|
||||
pmVolID, pErr := parseMigrationVolID(volID)
|
||||
if pErr != nil {
|
||||
@ -1991,6 +2068,14 @@ func (rv *rbdVolume) setAllMetadata(parameters map[string]string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if rv.ClusterName != "" {
|
||||
err := rv.SetMetadata(clusterNameKey, rv.ClusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata key %q, value %q on image: %w",
|
||||
clusterNameKey, rv.ClusterName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2004,5 +2089,11 @@ func (rv *rbdVolume) unsetAllMetadata(keys []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
err := rv.RemoveMetadata(clusterNameKey)
|
||||
// TODO: replace string comparison with errno.
|
||||
if err != nil && !strings.Contains(err.Error(), "No such file or directory") {
|
||||
return fmt.Errorf("failed to unset metadata key %q on %q: %w", clusterNameKey, rv, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -457,7 +457,8 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
|
||||
|
||||
func disableVolumeReplication(rbdVol *rbdVolume,
|
||||
mirroringInfo *librbd.MirrorImageInfo,
|
||||
force bool) (*replication.DisableVolumeReplicationResponse, error) {
|
||||
force bool,
|
||||
) (*replication.DisableVolumeReplicationResponse, error) {
|
||||
if !mirroringInfo.Primary {
|
||||
// Return success if the below condition is met
|
||||
// Local image is secondary
|
||||
@ -913,9 +914,8 @@ func resyncRequired(localStatus librbd.SiteMirrorImageStatus) bool {
|
||||
// In some corner cases like `re-player shutdown` the local image will not
|
||||
// be in an error state. It would be also worth considering the `description`
|
||||
// field to make sure about split-brain.
|
||||
splitBrain := "split-brain"
|
||||
if localStatus.State == librbd.MirrorImageStatusStateError ||
|
||||
strings.Contains(localStatus.Description, splitBrain) {
|
||||
strings.Contains(localStatus.Description, "split-brain") {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,8 @@ import (
|
||||
func createRBDClone(
|
||||
ctx context.Context,
|
||||
parentVol, cloneRbdVol *rbdVolume,
|
||||
snap *rbdSnapshot) error {
|
||||
snap *rbdSnapshot,
|
||||
) error {
|
||||
// create snapshot
|
||||
err := parentVol.createSnapshot(ctx, snap)
|
||||
if err != nil {
|
||||
@ -72,7 +73,8 @@ func cleanUpSnapshot(
|
||||
ctx context.Context,
|
||||
parentVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
rbdVol *rbdVolume) error {
|
||||
rbdVol *rbdVolume,
|
||||
) error {
|
||||
err := parentVol.deleteSnapshot(ctx, rbdSnap)
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrSnapNotFound) {
|
||||
@ -119,7 +121,8 @@ func undoSnapshotCloning(
|
||||
parentVol *rbdVolume,
|
||||
rbdSnap *rbdSnapshot,
|
||||
cloneVol *rbdVolume,
|
||||
cr *util.Credentials) error {
|
||||
cr *util.Credentials,
|
||||
) error {
|
||||
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to clean up %s or %s: %v", cloneVol, rbdSnap, err)
|
||||
|
@ -119,7 +119,8 @@ func ExecCommandWithTimeout(
|
||||
args ...string) (
|
||||
string,
|
||||
string,
|
||||
error) {
|
||||
error,
|
||||
) {
|
||||
var (
|
||||
sanitizedArgs = StripSecretInArgs(args)
|
||||
stdoutBuf bytes.Buffer
|
||||
|
@ -139,7 +139,8 @@ func GetMappedID(key, value, id string) string {
|
||||
|
||||
// fetchMappedClusterIDAndMons returns monitors and clusterID info after checking cluster mapping.
|
||||
func fetchMappedClusterIDAndMons(ctx context.Context,
|
||||
clusterID, clusterMappingConfigFile, csiConfigFile string) (string, string, error) {
|
||||
clusterID, clusterMappingConfigFile, csiConfigFile string,
|
||||
) (string, string, error) {
|
||||
var mons string
|
||||
clusterMappingInfo, err := getClusterMappingInfo(clusterID, clusterMappingConfigFile)
|
||||
if err != nil {
|
||||
|
@ -187,9 +187,9 @@ func generateNewEncryptionPassphrase() (string, error) {
|
||||
}
|
||||
|
||||
// VolumeMapper returns file name and it's path to where encrypted device should be open.
|
||||
func VolumeMapper(volumeID string) (mapperFile, mapperFilePath string) {
|
||||
mapperFile = mapperFilePrefix + volumeID
|
||||
mapperFilePath = path.Join(mapperFilePathPrefix, mapperFile)
|
||||
func VolumeMapper(volumeID string) (string, string) {
|
||||
mapperFile := mapperFilePrefix + volumeID
|
||||
mapperFilePath := path.Join(mapperFilePathPrefix, mapperFile)
|
||||
|
||||
return mapperFile, mapperFilePath
|
||||
}
|
||||
@ -248,7 +248,7 @@ func IsDeviceOpen(ctx context.Context, device string) (bool, error) {
|
||||
// DeviceEncryptionStatus looks to identify if the passed device is a LUKS mapping
|
||||
// and if so what the device is and the mapper name as used by LUKS.
|
||||
// If not, just returns the original device and an empty string.
|
||||
func DeviceEncryptionStatus(ctx context.Context, devicePath string) (mappedDevice, mapper string, err error) {
|
||||
func DeviceEncryptionStatus(ctx context.Context, devicePath string) (string, string, error) {
|
||||
if !strings.HasPrefix(devicePath, mapperFilePathPrefix) {
|
||||
return devicePath, "", nil
|
||||
}
|
||||
@ -274,7 +274,7 @@ func DeviceEncryptionStatus(ctx context.Context, devicePath string) (mappedDevic
|
||||
return "", "", fmt.Errorf("device encryption status output for %s is badly formatted: %s",
|
||||
devicePath, lines[i])
|
||||
}
|
||||
if strings.Compare(kv[0], "device") == 0 {
|
||||
if kv[0] == "device" {
|
||||
return strings.TrimSpace(kv[1]), mapPath, nil
|
||||
}
|
||||
}
|
||||
|
@ -26,14 +26,14 @@ const (
|
||||
|
||||
// PV and PVC metadata keys used by external provisioner as part of
|
||||
// create requests as parameters, when `extra-create-metadata` is true.
|
||||
pvcNameKey = "csi.storage.k8s.io/pvc/name"
|
||||
pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace"
|
||||
pvNameKey = "csi.storage.k8s.io/pv/name"
|
||||
pvcNameKey = csiParameterPrefix + "pvc/name"
|
||||
pvcNamespaceKey = csiParameterPrefix + "pvc/namespace"
|
||||
pvNameKey = csiParameterPrefix + "pv/name"
|
||||
|
||||
// snapshot metadata keys.
|
||||
volSnapNameKey = "csi.storage.k8s.io/volumesnapshot/name"
|
||||
volSnapNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace"
|
||||
volSnapContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name"
|
||||
volSnapNameKey = csiParameterPrefix + "volumesnapshot/name"
|
||||
volSnapNamespaceKey = csiParameterPrefix + "volumesnapshot/namespace"
|
||||
volSnapContentNameKey = csiParameterPrefix + "volumesnapshotcontent/name"
|
||||
)
|
||||
|
||||
// RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix.
|
||||
|
45
internal/util/k8s/version.go
Normal file
45
internal/util/k8s/version.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2022 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// GetServerVersion returns kubernetes server major and minor version as
|
||||
// integer.
|
||||
func GetServerVersion(client *kubernetes.Clientset) (int, int, error) {
|
||||
version, err := client.ServerVersion()
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get ServerVersion: %w", err)
|
||||
}
|
||||
|
||||
major, err := strconv.Atoi(version.Major)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert Kubernetes major version %q to int: %w", version.Major, err)
|
||||
}
|
||||
|
||||
minor, err := strconv.Atoi(version.Minor)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert Kubernetes minor version %q to int: %w", version.Minor, err)
|
||||
}
|
||||
|
||||
return major, minor, nil
|
||||
}
|
@ -141,7 +141,8 @@ type TopologyConstrainedPool struct {
|
||||
// GetTopologyFromRequest extracts TopologyConstrainedPools and passed in accessibility constraints
|
||||
// from a CSI CreateVolume request.
|
||||
func GetTopologyFromRequest(
|
||||
req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
|
||||
req *csi.CreateVolumeRequest,
|
||||
) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
|
||||
var topologyPools []TopologyConstrainedPool
|
||||
|
||||
// check if parameters have pool configuration pertaining to topology
|
||||
@ -171,7 +172,8 @@ func GetTopologyFromRequest(
|
||||
// MatchPoolAndTopology returns the topology map, if the passed in pool matches any
|
||||
// passed in accessibility constraints.
|
||||
func MatchPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
||||
accessibilityRequirements *csi.TopologyRequirement, poolName string) (string, string, map[string]string, error) {
|
||||
accessibilityRequirements *csi.TopologyRequirement, poolName string,
|
||||
) (string, string, map[string]string, error) {
|
||||
var topologyPool []TopologyConstrainedPool
|
||||
|
||||
if topologyPools == nil || accessibilityRequirements == nil {
|
||||
@ -199,7 +201,8 @@ func MatchPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
||||
// The return variables are, image poolname, data poolname, and topology map of
|
||||
// matched requirement.
|
||||
func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
|
||||
accessibilityRequirements *csi.TopologyRequirement) (string, string, map[string]string, error) {
|
||||
accessibilityRequirements *csi.TopologyRequirement,
|
||||
) (string, string, map[string]string, error) {
|
||||
if topologyPools == nil || accessibilityRequirements == nil {
|
||||
return "", "", nil, nil
|
||||
}
|
||||
|
@ -121,6 +121,9 @@ type Config struct {
|
||||
|
||||
// CSI-Addons endpoint
|
||||
CSIAddonsEndpoint string
|
||||
|
||||
// Cluster name
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
// ValidateDriverName validates the driver name.
|
||||
@ -260,7 +263,8 @@ func GenerateVolID(
|
||||
cr *Credentials,
|
||||
locationID int64,
|
||||
pool, clusterID, objUUID string,
|
||||
volIDVersion uint16) (string, error) {
|
||||
volIDVersion uint16,
|
||||
) (string, error) {
|
||||
var err error
|
||||
|
||||
if locationID == InvalidPoolID {
|
||||
|
@ -99,7 +99,7 @@ func (ci CSIIdentifier) ComposeCSIID() (string, error) {
|
||||
/*
|
||||
DecomposeCSIID composes a CSIIdentifier from passed in string.
|
||||
*/
|
||||
func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) {
|
||||
func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) error {
|
||||
bytesToProcess := uint16(len(composedCSIID))
|
||||
|
||||
// if length is less that expected constant elements, then bail out!
|
||||
|
@ -190,3 +190,7 @@ linters:
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- nilnil
|
||||
# TODO enable linters added in golangci-lint 1.46
|
||||
- maintidx
|
||||
- exhaustruct
|
||||
- containedctx
|
||||
|
@ -205,6 +205,11 @@ function check_rbd_stat() {
|
||||
RBD_POOL_NAME="device_health_metrics"
|
||||
fi
|
||||
|
||||
# Rook v1.9.x creates pool with name .mgr for builtin-mgr CephBlockPool CR
|
||||
if [[ "${RBD_POOL_NAME}" == "builtin-mgr" ]]; then
|
||||
RBD_POOL_NAME=".mgr"
|
||||
fi
|
||||
|
||||
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
|
||||
|
||||
TOOLBOX_POD=$(kubectl_retry -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
||||
|
@ -185,7 +185,7 @@ def check_pv_name_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
"""
|
||||
validate pvc information in rados
|
||||
"""
|
||||
omapkey = 'csi.volume.%s' % pvc_name
|
||||
omapkey = f'csi.volume.{pvc_name}'
|
||||
cmd = ['rados', 'getomapval', 'csi.volumes.default',
|
||||
omapkey, "--pool", pool_name]
|
||||
if not arg.userkey:
|
||||
@ -212,8 +212,8 @@ def check_pv_name_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
name += part[-1]
|
||||
if name.decode() != image_id:
|
||||
if arg.debug:
|
||||
print("expected image Id %s found Id in rados %s" %
|
||||
(image_id, name.decode()))
|
||||
decoded_name = name.decode()
|
||||
print(f"expected image Id {image_id} found Id in rados {decoded_name}")
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -246,7 +246,7 @@ def check_image_uuid_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
"""
|
||||
validate image uuid in rados
|
||||
"""
|
||||
omapkey = 'csi.volume.%s' % image_id
|
||||
omapkey = f'csi.volume.{image_id}'
|
||||
cmd = ['rados', 'getomapval', omapkey, "csi.volname", "--pool", pool_name]
|
||||
if not arg.userkey:
|
||||
cmd += ["--id", arg.userid, "--key", arg.userkey]
|
||||
@ -276,8 +276,8 @@ def check_image_uuid_in_rados(arg, image_id, pvc_name, pool_name, is_rbd):
|
||||
name += part[-1]
|
||||
if name.decode() != pvc_name:
|
||||
if arg.debug:
|
||||
print("expected image Id %s found Id in rados %s" %
|
||||
(pvc_name, name.decode()))
|
||||
decoded_name = name.decode()
|
||||
print(f"expected image Id {pvc_name} found Id in rados {decoded_name}")
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -565,7 +565,7 @@ def get_fsname_from_pvdata(arg, pvdata):
|
||||
if __name__ == "__main__":
|
||||
ARGS = PARSER.parse_args()
|
||||
if ARGS.command not in ["kubectl", "oc"]:
|
||||
print("%s command not supported" % ARGS.command)
|
||||
print(f"{ARGS.command} command not supported")
|
||||
sys.exit(1)
|
||||
if sys.version_info[0] < 3:
|
||||
print("python version less than 3 is not supported.")
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package aws
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.16.3"
|
||||
const goModuleVersion = "1.16.5"
|
||||
|
10
vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
generated
vendored
@ -2,9 +2,9 @@
|
||||
//
|
||||
// Retryer Interface and Implementations
|
||||
//
|
||||
// This packages defines Retryer interface that is used to either implement custom retry behavior
|
||||
// or to extend the existing retry implementations provided by the SDK. This packages provides a single
|
||||
// retry implementations: Standard.
|
||||
// This package defines Retryer interface that is used to either implement custom retry behavior
|
||||
// or to extend the existing retry implementations provided by the SDK. This package provides a single
|
||||
// retry implementation: Standard.
|
||||
//
|
||||
// Standard
|
||||
//
|
||||
@ -33,7 +33,7 @@
|
||||
// value.
|
||||
//
|
||||
// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
|
||||
// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions
|
||||
// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
|
||||
// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
|
||||
// and the retry delay policy.
|
||||
//
|
||||
@ -71,7 +71,7 @@
|
||||
// standard retryer.
|
||||
//
|
||||
// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
|
||||
// this can be used to extend the standard retryer to add additional logic ot determine if a
|
||||
// this can be used to extend the standard retryer to add additional logic to determine if an
|
||||
// error should be retried.
|
||||
//
|
||||
// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
|
||||
|
8
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
generated
vendored
@ -1,3 +1,11 @@
|
||||
# v1.1.12 (2022-06-07)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.11 (2022-05-17)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.1.10 (2022-04-25)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
generated
vendored
@ -3,4 +3,4 @@
|
||||
package configsources
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.1.10"
|
||||
const goModuleVersion = "1.1.12"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user