mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 18:59:30 +00:00
Merge pull request #80 from ceph/devel
sync downstream devel with upstream devel
This commit is contained in:
commit
000bb32927
@ -133,6 +133,7 @@ charts and their default values.
|
||||
| `driverName` | Name of the csi-driver | `cephfs.csi.ceph.com` |
|
||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||
| `cephConfConfigMapName` | Name of the configmap which contains ceph.conf configuration | `ceph-config` |
|
||||
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
|
||||
| `storageClass.name` | Specifies the cephFS StorageClass name | `csi-cephfs-sc` |
|
||||
| `storageClass.annotations` | Specifies the annotations for the cephFS storageClass | `[]` |
|
||||
@ -156,6 +157,7 @@ charts and their default values.
|
||||
| `secret.name` | Specifies the cephFS secret name | `csi-cephfs-secret` |
|
||||
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
|
||||
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
|
||||
| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` |
|
||||
|
||||
### Command Line
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
|
@ -112,9 +112,11 @@ spec:
|
||||
name: host-mount
|
||||
- mountPath: /sys
|
||||
name: host-sys
|
||||
{{- if .Values.selinuxMount }}
|
||||
- mountPath: /etc/selinux
|
||||
name: etc-selinux
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
@ -146,6 +148,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
ports:
|
||||
- containerPort: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -172,9 +178,11 @@ spec:
|
||||
- name: host-sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
{{- if .Values.selinuxMount }}
|
||||
- name: etc-selinux
|
||||
hostPath:
|
||||
path: /etc/selinux
|
||||
{{- end }}
|
||||
- name: host-mount
|
||||
hostPath:
|
||||
path: /run/mount
|
||||
@ -186,7 +194,7 @@ spec:
|
||||
path: /dev
|
||||
- name: ceph-config
|
||||
configMap:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
- name: ceph-csi-config
|
||||
configMap:
|
||||
name: {{ .Values.configMapName | quote }}
|
||||
|
@ -17,6 +17,9 @@ spec:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
max: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
@ -37,8 +40,10 @@ spec:
|
||||
readOnly: false
|
||||
- pathPrefix: '/sys'
|
||||
readOnly: false
|
||||
{{- if .Values.selinuxMount }}
|
||||
- pathPrefix: '/etc/selinux'
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- pathPrefix: '/lib/modules'
|
||||
readOnly: true
|
||||
- pathPrefix: '{{ .Values.kubeletDir }}'
|
||||
|
@ -192,6 +192,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
ports:
|
||||
- containerPort: {{ .Values.provisioner.httpMetrics.containerPort }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -214,7 +218,7 @@ spec:
|
||||
path: /dev
|
||||
- name: ceph-config
|
||||
configMap:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
- name: ceph-csi-config
|
||||
configMap:
|
||||
name: {{ .Values.configMapName | quote }}
|
||||
|
@ -201,6 +201,10 @@ provisioner:
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
# Mount the host /etc/selinux inside pods to support
|
||||
# selinux-enabled filesystems
|
||||
selinuxMount: true
|
||||
|
||||
topology:
|
||||
# Specifies whether topology based provisioning support should
|
||||
# be exposed by CSI
|
||||
@ -319,3 +323,5 @@ configMapName: ceph-csi-config
|
||||
# configMapKey:
|
||||
# Use an externally provided configmap
|
||||
externallyManagedConfigmap: false
|
||||
# Name of the configmap used for ceph.conf
|
||||
cephConfConfigMapName: ceph-config
|
||||
|
@ -142,6 +142,7 @@ charts and their default values.
|
||||
| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` |
|
||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||
| `cephConfConfigMapName` | Name of the configmap which contains ceph.conf configuration | `ceph-config` |
|
||||
| `kmsConfigMapName` | Name of the configmap used for encryption kms configuration | `ceph-csi-encryption-kms-config` |
|
||||
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
|
||||
| `storageClass.name` | Specifies the rbd StorageClass name | `csi-rbd-sc` |
|
||||
@ -175,6 +176,7 @@ charts and their default values.
|
||||
| `secret.userID` | Specifies the user ID of the rbd secret | `<plaintext ID>` |
|
||||
| `secret.userKey` | Specifies the key that corresponds to the userID | `<Ceph auth key corresponding to ID above>` |
|
||||
| `secret.encryptionPassphrase` | Specifies the encryption passphrase of the secret | `test_passphrase` |
|
||||
| `selinuxMount` | Mount the host /etc/selinux inside pods to support selinux-enabled filesystems | `true` |
|
||||
|
||||
### Command Line
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-rbd.name" . }}
|
||||
|
@ -109,9 +109,11 @@ spec:
|
||||
name: host-mount
|
||||
- mountPath: /sys
|
||||
name: host-sys
|
||||
{{- if .Values.selinuxMount }}
|
||||
- mountPath: /etc/selinux
|
||||
name: etc-selinux
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
@ -153,6 +155,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
ports:
|
||||
- containerPort: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -189,15 +195,17 @@ spec:
|
||||
- name: host-sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
{{- if .Values.selinuxMount }}
|
||||
- name: etc-selinux
|
||||
hostPath:
|
||||
path: /etc/selinux
|
||||
{{- end }}
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: ceph-config
|
||||
configMap:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
- name: ceph-csi-config
|
||||
configMap:
|
||||
name: {{ .Values.configMapName | quote }}
|
||||
|
@ -17,6 +17,9 @@ spec:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
max: {{ .Values.nodeplugin.httpMetrics.containerPort }}
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
@ -37,8 +40,10 @@ spec:
|
||||
readOnly: false
|
||||
- pathPrefix: '/sys'
|
||||
readOnly: false
|
||||
{{- if .Values.selinuxMount }}
|
||||
- pathPrefix: '/etc/selinux'
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
- pathPrefix: '/lib/modules'
|
||||
readOnly: true
|
||||
- pathPrefix: '{{ .Values.cephLogDirHostPath }}'
|
||||
|
@ -229,6 +229,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
ports:
|
||||
- containerPort: {{ .Values.provisioner.httpMetrics.containerPort }}
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -251,7 +255,7 @@ spec:
|
||||
path: /lib/modules
|
||||
- name: ceph-config
|
||||
configMap:
|
||||
name: ceph-config
|
||||
name: {{ .Values.cephConfConfigMapName | quote }}
|
||||
- name: ceph-csi-config
|
||||
configMap:
|
||||
name: {{ .Values.configMapName | quote }}
|
||||
|
@ -279,10 +279,10 @@ storageClass:
|
||||
# eg: pool: replicapool
|
||||
pool: replicapool
|
||||
|
||||
# (required) RBD image features, CSI creates image with image-format 2
|
||||
# CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`,
|
||||
# `object-map`, `fast-diff` features. If `journaling` is enabled, must
|
||||
# enable `exclusive-lock` too.
|
||||
# (optional) RBD image features, CSI creates image with image-format 2 CSI
|
||||
# RBD currently supports `layering`, `journaling`, `exclusive-lock`,
|
||||
# `object-map`, `fast-diff`, `deep-flatten` features. If `journaling` is
|
||||
# enabled, must enable `exclusive-lock` too.
|
||||
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
|
||||
imageFeatures: "layering"
|
||||
|
||||
@ -399,6 +399,10 @@ storageClass:
|
||||
# mountOptions:
|
||||
# - discard
|
||||
|
||||
# Mount the host /etc/selinux inside pods to support
|
||||
# selinux-enabled filesystems
|
||||
selinuxMount: true
|
||||
|
||||
secret:
|
||||
# Specifies whether the secret should be created
|
||||
create: false
|
||||
@ -443,5 +447,7 @@ configMapName: ceph-csi-config
|
||||
# configMapKey:
|
||||
# Use an externally provided configmap
|
||||
externallyManagedConfigmap: false
|
||||
# Name of the configmap used for ceph.conf
|
||||
cephConfConfigMapName: ceph-config
|
||||
# Name of the configmap used for encryption kms configuration
|
||||
kmsConfigMapName: ceph-csi-encryption-kms-config
|
||||
|
@ -56,7 +56,7 @@ make image-cephcsi
|
||||
| `dataPool` | no | Ceph pool used for the data of the RBD images. |
|
||||
| `volumeNamePrefix` | no | Prefix to use for naming RBD images (defaults to `csi-vol-`). |
|
||||
| `snapshotNamePrefix` | no | Prefix to use for naming RBD snapshot images (defaults to `csi-snap-`). |
|
||||
| `imageFeatures` | yes | RBD image features. CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`, `object-map`, `fast-diff` features. If `journaling` is enabled, must enable `exclusive-lock` too. See [man pages](http://docs.ceph.com/docs/master/man/8/rbd/#cmdoption-rbd-image-feature) Note that the required support for [object-map and fast-diff were added in 5.3 and journaling does not have KRBD support yet](https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features). deep-flatten is added for cloned images. |
|
||||
| `imageFeatures` | no | RBD image features. CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`, `object-map`, `fast-diff`, `deep-flatten` features. If `journaling` is enabled, must enable `exclusive-lock` too. See [man pages](http://docs.ceph.com/docs/master/man/8/rbd/#cmdoption-rbd-image-feature) Note that the required support for [object-map and fast-diff were added in 5.3, deep-flatten was added in 5.1 and journaling does not have KRBD support yet](https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features). deep-flatten is added for cloned images. |
|
||||
| `tryOtherMounters` | no | Specifies whether to try other mounters in case if the current mounter fails to mount the rbd image for any reason |
|
||||
| `mapOptions` | no | Map options to use when mapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
||||
| `unmapOptions` | no | Unmap options to use when unmapping rbd image. See [krbd](https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options) and [nbd](https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options) options. |
|
||||
|
@ -84,14 +84,16 @@ volumesnapshot.snapshot.storage.k8s.io/cephfs-pvc-snapshot created
|
||||
$ kubectl get volumesnapshot
|
||||
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
|
||||
cephfs-pvc-snapshot true csi-cephfs-pvc 1Gi csi-cephfsplugin-snapclass snapcontent-34476204-a14a-4d59-bfbc-2bbba695652c 3s 6s
|
||||
|
||||
```
|
||||
|
||||
- Get details about the volumesnapshotcontent
|
||||
|
||||
```console
|
||||
$ kubectl get volumesnapshotcontent
|
||||
NAME READYTOUSE RESTORESIZE DELETIONPOLICY DRIVER VOLUMESNAPSHOTCLASS VOLUMESNAPSHOT AGE
|
||||
snapcontent-34476204-a14a-4d59-bfbc-2bbba695652c true 1073741824 Delete cephfs.csi.ceph.com csi-cephfsplugin-snapclass cephfs-pvc-snapshot 64s
|
||||
NAME READYTOUSE RESTORESIZE DELETIONPOLICY DRIVER VOLUMESNAPSHOTCLASS VOLUMESNAPSHOT VOLUMESNAPSHOTNAMESPACE AGE
|
||||
snapcontent-881cb74a-9dff-4989-a83d-eece5ed079af true 1073741824 Delete cephfs.csi.ceph.com csi-cephfsplugin-snapclass cephfs-pvc-snapshot default 12m
|
||||
|
||||
```
|
||||
|
||||
### Restore CephFS Snapshot
|
||||
@ -145,8 +147,9 @@ kubectl create -f snapshotclass.yaml
|
||||
|
||||
```console
|
||||
$ kubectl get volumesnapshotclass
|
||||
NAME AGE
|
||||
csi-rbdplugin-snapclass 4s
|
||||
NAME DRIVER DELETIONPOLICY AGE
|
||||
csi-rbdplugin-snapclass rbd.csi.ceph.com Delete 30m
|
||||
|
||||
```
|
||||
|
||||
### Create RBD Snapshot
|
||||
@ -159,41 +162,8 @@ kubectl create -f snapshot.yaml
|
||||
|
||||
```console
|
||||
$ kubectl get volumesnapshot
|
||||
NAME AGE
|
||||
rbd-pvc-snapshot 6s
|
||||
```
|
||||
|
||||
### Check the status of the Snapshot
|
||||
|
||||
```console
|
||||
$ kubectl describe volumesnapshot rbd-pvc-snapshot
|
||||
|
||||
Name: rbd-pvc-snapshot
|
||||
Namespace: default
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
API Version: snapshot.storage.k8s.io/v1alpha1
|
||||
Kind: VolumeSnapshot
|
||||
Metadata:
|
||||
Creation Timestamp: 2019-02-06T08:52:34Z
|
||||
Finalizers:
|
||||
snapshot.storage.kubernetes.io/volumesnapshot-protection
|
||||
Generation: 5
|
||||
Resource Version: 84239
|
||||
Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/namespaces/default/volumesnapshots/rbd-pvc-snapshot
|
||||
UID: 8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
|
||||
Spec:
|
||||
Snapshot Class Name: csi-rbdplugin-snapclass
|
||||
Snapshot Content Name: snapcontent-8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
|
||||
Source:
|
||||
API Group: <nil>
|
||||
Kind: PersistentVolumeClaim
|
||||
Name: rbd-pvc
|
||||
Status:
|
||||
Creation Time: 2019-02-06T08:52:34Z
|
||||
Ready To Use: true
|
||||
Restore Size: 1Gi
|
||||
Events: <none>
|
||||
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
|
||||
rbd-pvc-snapshot true rbd-pvc 1Gi csi-rbdplugin-snapclass snapcontent-905e6015-2403-4302-8a4e-cd3bdf63507b 78s 79s
|
||||
```
|
||||
|
||||
### Restore RBD Snapshot
|
||||
|
@ -51,21 +51,12 @@ var (
|
||||
func deployCephfsPlugin() {
|
||||
// delete objects deployed by rook
|
||||
|
||||
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
err := deleteResource(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner rbac %s: %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s: %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
|
||||
err = deleteResource(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s: %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
|
@ -22,10 +22,13 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@ -100,9 +103,25 @@ func createCephfsStorageClass(
|
||||
sc.Parameters["clusterID"] = strings.Trim(fsID, "\n")
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
|
||||
return err
|
||||
timeout := time.Duration(deployTimeout) * time.Minute
|
||||
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error creating StorageClass %q in namespace %q: %v", sc.Name, sc.Namespace, err)
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("failed to create StorageClass %q: %w", sc.Name, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -62,6 +63,10 @@ func loadAppDeployment(path string) (*appsv1.Deployment, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range deploy.Spec.Template.Spec.Containers {
|
||||
deploy.Spec.Template.Spec.Containers[i].ImagePullPolicy = v1.PullIfNotPresent
|
||||
}
|
||||
|
||||
return &deploy, nil
|
||||
}
|
||||
|
||||
@ -140,6 +145,9 @@ func waitForDeploymentComplete(clientSet kubernetes.Interface, name, ns string,
|
||||
if isRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
if apierrs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("deployment error: %v", err)
|
||||
|
||||
return false, err
|
||||
|
@ -72,5 +72,4 @@ func handleFlags() {
|
||||
framework.RegisterClusterFlags(flag.CommandLine)
|
||||
testing.Init()
|
||||
flag.Parse()
|
||||
initResources()
|
||||
}
|
||||
|
116
e2e/rbd.go
116
e2e/rbd.go
@ -795,6 +795,122 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
})
|
||||
|
||||
By("create PVC with layering,deep-flatten image-features and bind it to an app",
|
||||
func() {
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(
|
||||
f.ClientSet,
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{
|
||||
"imageFeatures": "layering,deep-flatten",
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
// set up PVC
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
if util.CheckKernelSupport(kernelRelease, deepFlattenSupport) {
|
||||
app, aErr := loadApp(appPath)
|
||||
if aErr != nil {
|
||||
e2elog.Failf("failed to load application: %v", aErr)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
// delete pod as we should not create snapshot for in-use pvc
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete application: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
// clean up after ourselves
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("create PVC with layering,deep-flatten image-features and bind it to an app",
|
||||
func() {
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(
|
||||
f.ClientSet,
|
||||
f,
|
||||
defaultSCName,
|
||||
nil,
|
||||
map[string]string{
|
||||
"imageFeatures": "",
|
||||
},
|
||||
deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
// set up PVC
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC: %v", err)
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 1, defaultRBDPool)
|
||||
|
||||
// checking the minimal kernel version for fast-diff as its
|
||||
// higher kernel version than other default image features.
|
||||
if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
|
||||
app, aErr := loadApp(appPath)
|
||||
if aErr != nil {
|
||||
e2elog.Failf("failed to load application: %v", aErr)
|
||||
}
|
||||
app.Namespace = f.UniqueName
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create application: %v", err)
|
||||
}
|
||||
// delete pod as we should not create snapshot for in-use pvc
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete application: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
// clean up after ourselves
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete PVC: %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("create PVC with journaling,fast-diff image-features and bind it to an app using rbd-nbd mounter",
|
||||
func() {
|
||||
if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
|
||||
|
@ -62,6 +62,18 @@ var fastDiffSupport = []util.KernelVersion{
|
||||
}, // standard 5.3+ versions
|
||||
}
|
||||
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
var deepFlattenSupport = []util.KernelVersion{
|
||||
{
|
||||
Version: 5,
|
||||
PatchLevel: 1,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 0,
|
||||
Distribution: "",
|
||||
Backport: false,
|
||||
}, // standard 5.1+ versions
|
||||
}
|
||||
|
||||
// To use `io-timeout=0` we need
|
||||
// www.mail-archive.com/linux-block@vger.kernel.org/msg38060.html
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
@ -137,6 +149,10 @@ func createRBDStorageClass(
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
for k, v := range parameters {
|
||||
sc.Parameters[k] = v
|
||||
// if any values are empty remove it from the map
|
||||
if v == "" {
|
||||
delete(sc.Parameters, k)
|
||||
}
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
|
||||
|
@ -77,14 +77,9 @@ var (
|
||||
cephCSINamespace string
|
||||
rookNamespace string
|
||||
radosNamespace string
|
||||
ns string
|
||||
poll = 2 * time.Second
|
||||
)
|
||||
|
||||
func initResources() {
|
||||
ns = fmt.Sprintf("--namespace=%v", cephCSINamespace)
|
||||
}
|
||||
|
||||
func getMons(ns string, c kubernetes.Interface) ([]string, error) {
|
||||
opt := metav1.ListOptions{
|
||||
LabelSelector: "app=rook-ceph-mon",
|
||||
@ -151,7 +146,7 @@ func deleteResource(scPath string) error {
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to read content from %s %v", scPath, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout)
|
||||
err = retryKubectlInput(cephCSINamespace, kubectlDelete, data, deployTimeout, "--ignore-not-found=true")
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to delete %s %v", scPath, err)
|
||||
}
|
||||
|
@ -29,10 +29,10 @@ parameters:
|
||||
# eg: pool: rbdpool
|
||||
pool: <rbd-pool-name>
|
||||
|
||||
# (required) RBD image features, CSI creates image with image-format 2
|
||||
# CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`,
|
||||
# `object-map`, `fast-diff` features. If `journaling` is enabled, must
|
||||
# enable `exclusive-lock` too.
|
||||
# (optional) RBD image features, CSI creates image with image-format 2 CSI
|
||||
# RBD currently supports `layering`, `journaling`, `exclusive-lock`,
|
||||
# `object-map`, `fast-diff`, `deep-flatten` features. If `journaling` is
|
||||
# enabled, must enable `exclusive-lock` too.
|
||||
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
|
||||
imageFeatures: "layering"
|
||||
|
||||
|
70
go.mod
70
go.mod
@ -4,9 +4,9 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/IBM/keyprotect-go-client v0.7.0
|
||||
github.com/aws/aws-sdk-go v1.42.48
|
||||
github.com/aws/aws-sdk-go v1.43.3
|
||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||
github.com/ceph/go-ceph v0.13.0
|
||||
github.com/ceph/go-ceph v0.14.0
|
||||
github.com/container-storage-interface/spec v1.5.0
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0
|
||||
github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe
|
||||
@ -26,16 +26,16 @@ require (
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||
google.golang.org/grpc v1.44.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/api v0.23.4
|
||||
k8s.io/apimachinery v0.23.4
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.23.3
|
||||
k8s.io/cloud-provider v0.23.4
|
||||
k8s.io/klog/v2 v2.40.1
|
||||
//
|
||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||
//
|
||||
k8s.io/kubernetes v1.23.3
|
||||
k8s.io/mount-utils v0.23.3
|
||||
k8s.io/kubernetes v1.23.4
|
||||
k8s.io/mount-utils v0.23.4
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed
|
||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
||||
)
|
||||
@ -139,9 +139,9 @@ require (
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/apiserver v0.23.3 // indirect
|
||||
k8s.io/component-base v0.23.3 // indirect
|
||||
k8s.io/component-helpers v0.23.3 // indirect
|
||||
k8s.io/apiserver v0.23.4 // indirect
|
||||
k8s.io/component-base v0.23.4 // indirect
|
||||
k8s.io/component-helpers v0.23.4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.0.0 // indirect
|
||||
@ -160,31 +160,31 @@ replace (
|
||||
//
|
||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||
//
|
||||
k8s.io/api => k8s.io/api v0.23.3
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.23.3
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.23.3
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3
|
||||
k8s.io/client-go => k8s.io/client-go v0.23.3
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.23.3
|
||||
k8s.io/component-base => k8s.io/component-base v0.23.3
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.23.3
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.23.3
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.23.3
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.23.3
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.23.3
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3
|
||||
k8s.io/metrics => k8s.io/metrics v0.23.3
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.23.3
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3
|
||||
k8s.io/api => k8s.io/api v0.23.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.23.4
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.23.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.23.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.23.4
|
||||
k8s.io/component-base => k8s.io/component-base v0.23.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.23.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.23.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.23.4
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.23.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.23.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.23.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.23.4
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.4
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.4
|
||||
// layeh.com seems to be misbehaving
|
||||
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||
)
|
||||
|
84
go.sum
84
go.sum
@ -138,8 +138,8 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
|
||||
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.42.48 h1:8ZVBAsA9X2eCpSr/8SrWDk4BOT91wRdqxpAog875+K0=
|
||||
github.com/aws/aws-sdk-go v1.42.48/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||
github.com/aws/aws-sdk-go v1.43.3 h1:qvCkC4FviA9rR4UvRk4ldr6f3mIJE0VaI3KrsDx1gTk=
|
||||
github.com/aws/aws-sdk-go v1.43.3/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -168,8 +168,8 @@ github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
|
||||
github.com/ceph/go-ceph v0.13.0 h1:69dgIPlNHD2OCz98T0benI4++vcnShGcpQK4RIALjw4=
|
||||
github.com/ceph/go-ceph v0.13.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY=
|
||||
github.com/ceph/go-ceph v0.14.0 h1:sJoT0au7NT3TPmDWf5W9w6tZy0U/5xZrIXVVauZR+Xo=
|
||||
github.com/ceph/go-ceph v0.14.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
@ -1689,28 +1689,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/apiextensions-apiserver v0.23.3 h1:JvPJA7hSEAqMRteveq4aj9semilAZYcJv+9HHFWfUdM=
|
||||
k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEUDF+hrpIQJL38=
|
||||
k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.23.3 h1:gWY1DmA0AdAGR/H+Q/1FtyGkFq8xqSaZOw7oLopmO8k=
|
||||
k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4=
|
||||
k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc=
|
||||
k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs=
|
||||
k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE=
|
||||
k8s.io/cloud-provider v0.23.3 h1:/3hcsBAyG5VpeDAyJQvRqf5US4dHU0Hu57MJiq4zG/w=
|
||||
k8s.io/cloud-provider v0.23.3/go.mod h1:Ik+pKlpPOp0Zs906xyOpT3g2xB9A8VGNdejMTZS6EeA=
|
||||
k8s.io/cluster-bootstrap v0.23.3/go.mod h1:NwUIksUHKNOKIHg/AfLH4NxqylbfEVXUh9EX2NxHZII=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY=
|
||||
k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg=
|
||||
k8s.io/component-helpers v0.23.3 h1:OzuQpfsJsjGvT2nYnp0JsyxpGbnsv0GSvRlIkMKx+I8=
|
||||
k8s.io/component-helpers v0.23.3/go.mod h1:SH+W/WPTaTenbWyDEeY7iytAQiMh45aqKxkvlqQ57cg=
|
||||
k8s.io/controller-manager v0.23.3/go.mod h1:E0ss6ogA93sZ+AuibQSa7H4xWIiICTYFjowkjellVeU=
|
||||
k8s.io/cri-api v0.23.3/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
|
||||
k8s.io/csi-translation-lib v0.23.3/go.mod h1:8J7hpeqMoCJWofd1lCs4vZrEshdbVYrqurFeB6GZ/+E=
|
||||
k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E=
|
||||
k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI=
|
||||
k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU=
|
||||
k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g=
|
||||
k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM=
|
||||
k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.23.4 h1:zNvQlG+C/ERjuUz4p7eY/0IWHaMixRSBoxgmyIdwo9Y=
|
||||
k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc=
|
||||
k8s.io/cli-runtime v0.23.4/go.mod h1:7KywUNTUibmHPqmpDFuRO1kc9RhsufHv2lkjCm2YZyM=
|
||||
k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU=
|
||||
k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0=
|
||||
k8s.io/cloud-provider v0.23.4 h1:Nx42V7+Vpaad3qZE031MpTfCDl3jeQrX6wuwieES/nc=
|
||||
k8s.io/cloud-provider v0.23.4/go.mod h1:+RFNcj7DczZJE250/l55hh4Be4tlHkNgdtmI4PzxhJ0=
|
||||
k8s.io/cluster-bootstrap v0.23.4/go.mod h1:H5UZ3a4ZvjyUIgTgW8VdnN1rm3DsRqhotqK9oDMHU1o=
|
||||
k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.4 h1:SziYh48+QKxK+ykJ3Ejqd98XdZIseVBG7sBaNLPqy6M=
|
||||
k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E=
|
||||
k8s.io/component-helpers v0.23.4 h1:zCLeBuo3Qs0BqtJu767RXJgs5S9ruFJZcbM1aD+cMmc=
|
||||
k8s.io/component-helpers v0.23.4/go.mod h1:1Pl7L4zukZ054ElzRbvmZ1FJIU8roBXFOeRFu8zipa4=
|
||||
k8s.io/controller-manager v0.23.4/go.mod h1:+ednTkO5Z25worecG5ORa7NssZT0cpuVunVHN+24Ccs=
|
||||
k8s.io/cri-api v0.23.4/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
|
||||
k8s.io/csi-translation-lib v0.23.4/go.mod h1:hvAm5aoprpfE7p9Xnfe3ObmbhDcYp3U7AZJnVQUlrqw=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
@ -1723,26 +1723,26 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-aggregator v0.23.3/go.mod h1:pt5QJ3QaIdhZzNlUvN5wndbM0LNT4BvhszGkzy2QdFo=
|
||||
k8s.io/kube-controller-manager v0.23.3/go.mod h1:e8m5dhjei67DlLZA/QTvenxiGyonG9UhgHtU1LMslJE=
|
||||
k8s.io/kube-aggregator v0.23.4/go.mod h1:hpmPi4oaLBe014CkBCqzBYWok64H2C7Ka6FBLJvHgkg=
|
||||
k8s.io/kube-controller-manager v0.23.4/go.mod h1:r4Cn9Y8t3GyMPrPnOGCDRpeyEKVOITuwHJ7pIWXH0IY=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-proxy v0.23.3/go.mod h1:XdvwqJkR9r0ddUAX4ruA4V22Kws3qzKvgL3rIq584Ko=
|
||||
k8s.io/kube-scheduler v0.23.3/go.mod h1:/thFQoAMv9/olDOEYVSQbUohmkJJyIPUmpVu0UealSM=
|
||||
k8s.io/kubectl v0.23.3 h1:gJsF7cahkWDPYlNvYKK+OrBZLAJUBzCym+Zsi+dfi1E=
|
||||
k8s.io/kubectl v0.23.3/go.mod h1:VBeeXNgLhSabu4/k0O7Q0YujgnA3+CLTUE0RcmF73yY=
|
||||
k8s.io/kubelet v0.23.3 h1:jYed8HoT0H2zXzf5Av+Ml8z5erN39uJfKh/yplYMgkg=
|
||||
k8s.io/kubelet v0.23.3/go.mod h1:RZxGSCsiwoWJ9z6mVla+jhiLfCFIKC16yAS38D7GQSE=
|
||||
k8s.io/kubernetes v1.23.3 h1:weuFJOkRP7+057uvhNUYbVTVCog/klquhbtKRD+UHUo=
|
||||
k8s.io/kubernetes v1.23.3/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io=
|
||||
k8s.io/legacy-cloud-providers v0.23.3/go.mod h1:s9vv59dUv4SU+HAm9C/YDdyw2OY9qmFYmcGEwr/ecDc=
|
||||
k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE=
|
||||
k8s.io/mount-utils v0.23.3 h1:zPRPjS5rCOeEo4M6H5ysnwddVuYwEgJsiMgo2fgbPH0=
|
||||
k8s.io/mount-utils v0.23.3/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98=
|
||||
k8s.io/pod-security-admission v0.23.3/go.mod h1:vULEGUgsujyrKBz3RRRZnvrJJt115gu0GICArDmgzqo=
|
||||
k8s.io/sample-apiserver v0.23.3/go.mod h1:5yDZRMfFvp7/2BOXBwk0AFNsD00iyuXeEsWZSoLFeGw=
|
||||
k8s.io/kube-proxy v0.23.4/go.mod h1:uZBvTCJYVBqnlyup3JpXaMmqrlkzHjcakHhf7ojYUKk=
|
||||
k8s.io/kube-scheduler v0.23.4/go.mod h1:KNKYvMZ8dhoMLYygiEMEK+JKFQ2fhW2CLj7B5zEQ/68=
|
||||
k8s.io/kubectl v0.23.4 h1:mAa+zEOlyZieecEy+xSrhjkpMcukYyHWzcNdX28dzMY=
|
||||
k8s.io/kubectl v0.23.4/go.mod h1:Dgb0Rvx/8JKS/C2EuvsNiQc6RZnX0SbHJVG3XUzH6ok=
|
||||
k8s.io/kubelet v0.23.4 h1:yptgklhQ3dtHHIpH/RgI0861XWoJ9/YIBnnxYS6l8VI=
|
||||
k8s.io/kubelet v0.23.4/go.mod h1:RjbycP9Wnpbw33G8yFt9E23+pFYxzWy1d8qHU0KVUgg=
|
||||
k8s.io/kubernetes v1.23.4 h1:25dqAMS96u+9L/A7AHdEW7aMTcmHoQMbMPug6Fa61JE=
|
||||
k8s.io/kubernetes v1.23.4/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io=
|
||||
k8s.io/legacy-cloud-providers v0.23.4/go.mod h1:dl0qIfmTyeDpRe/gaudDVnLsykKW2DE7oBWbuJl2Gd8=
|
||||
k8s.io/metrics v0.23.4/go.mod h1:cl6sY9BdVT3DubbpqnkPIKi6mn/F2ltkU4yH1tEJ3Bo=
|
||||
k8s.io/mount-utils v0.23.4 h1:tWUj5A0DJ29haMiO7F3pNdP2HwyMWczzvqQmikFc9s8=
|
||||
k8s.io/mount-utils v0.23.4/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98=
|
||||
k8s.io/pod-security-admission v0.23.4/go.mod h1:cikO3akkUoTZ8uFhkHdlWp0m3XosiOqssTHb+TfCjLw=
|
||||
k8s.io/sample-apiserver v0.23.4/go.mod h1:ITqvv82GqqeRue7dmsP7A/As/MHE2v1H3vriNRFv+/U=
|
||||
k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
@ -55,12 +56,12 @@ type ControllerServer struct {
|
||||
func (cs *ControllerServer) createBackingVolume(
|
||||
ctx context.Context,
|
||||
volOptions,
|
||||
parentVolOpt *core.VolumeOptions,
|
||||
|
||||
vID,
|
||||
pvID *core.VolumeIdentifier,
|
||||
sID *core.SnapshotIdentifier) error {
|
||||
parentVolOpt *store.VolumeOptions,
|
||||
pvID *store.VolumeIdentifier,
|
||||
sID *store.SnapshotIdentifier) error {
|
||||
var err error
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
|
||||
if sID != nil {
|
||||
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
@ -68,8 +69,12 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
|
||||
snap := core.Snapshot{
|
||||
SnapshotID: sID.FsSnapshotName,
|
||||
SubVolume: &parentVolOpt.SubVolume,
|
||||
}
|
||||
|
||||
err = core.CreateCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
|
||||
err = volClient.CreateCloneFromSnapshot(ctx, snap)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
@ -85,12 +90,8 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
|
||||
err = core.CreateCloneFromSubvolume(
|
||||
ctx,
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vID.FsSubvolName),
|
||||
volOptions,
|
||||
parentVolOpt)
|
||||
err = volClient.CreateCloneFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
|
||||
|
||||
@ -99,8 +100,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = core.CreateVolume(ctx, volOptions, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
if err = volClient.CreateVolume(ctx); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
@ -112,7 +112,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
func checkContentSource(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*core.VolumeOptions, *core.VolumeIdentifier, *core.SnapshotIdentifier, error) {
|
||||
cr *util.Credentials) (*store.VolumeOptions, *store.VolumeIdentifier, *store.SnapshotIdentifier, error) {
|
||||
if req.VolumeContentSource == nil {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
@ -120,7 +120,7 @@ func checkContentSource(
|
||||
switch volumeSource.Type.(type) {
|
||||
case *csi.VolumeContentSource_Snapshot:
|
||||
snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId()
|
||||
volOpt, _, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
@ -133,7 +133,7 @@ func checkContentSource(
|
||||
case *csi.VolumeContentSource_Volume:
|
||||
// Find the volume using the provided VolumeID
|
||||
volID := req.VolumeContentSource.GetVolume().GetVolumeId()
|
||||
parentVol, pvID, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
|
||||
parentVol, pvID, err := store.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
@ -179,7 +179,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(requestName)
|
||||
|
||||
volOptions, err := core.NewVolumeOptions(ctx, requestName, req, cr)
|
||||
volOptions, err := store.NewVolumeOptions(ctx, requestName, req, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
@ -199,7 +199,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
defer parentVol.Destroy()
|
||||
}
|
||||
|
||||
vID, err := core.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
|
||||
vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
|
||||
if err != nil {
|
||||
if cerrors.IsCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
@ -211,9 +211,10 @@ func (cs *ControllerServer) CreateVolume(
|
||||
|
||||
if vID != nil {
|
||||
if sID != nil || pvID != nil {
|
||||
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
err = volClient.ExpandVolume(ctx, volOptions.Size)
|
||||
if err != nil {
|
||||
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false)
|
||||
purgeErr := volClient.PurgeVolume(ctx, false)
|
||||
if purgeErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr)
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
@ -221,7 +222,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
return nil, status.Error(codes.Internal, purgeErr.Error())
|
||||
}
|
||||
}
|
||||
errUndo := core.UndoVolReservation(ctx, volOptions, *vID, secret)
|
||||
errUndo := store.UndoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if errUndo != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
requestName, errUndo)
|
||||
@ -254,7 +255,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}
|
||||
|
||||
// Reservation
|
||||
vID, err = core.ReserveVol(ctx, volOptions, secret)
|
||||
vID, err = store.ReserveVol(ctx, volOptions, secret)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -262,7 +263,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !cerrors.IsCloneRetryError(err) {
|
||||
errDefer := core.UndoVolReservation(ctx, volOptions, *vID, secret)
|
||||
errDefer := store.UndoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
requestName, errDefer)
|
||||
@ -272,7 +273,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}()
|
||||
|
||||
// Create a volume
|
||||
err = cs.createBackingVolume(ctx, volOptions, parentVol, vID, pvID, sID)
|
||||
err = cs.createBackingVolume(ctx, volOptions, parentVol, pvID, sID)
|
||||
if err != nil {
|
||||
if cerrors.IsCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
@ -281,9 +282,10 @@ func (cs *ControllerServer) CreateVolume(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vID.FsSubvolName))
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
volOptions.RootPath, err = volClient.GetVolumeRootPathCeph(ctx)
|
||||
if err != nil {
|
||||
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true)
|
||||
purgeErr := volClient.PurgeVolume(ctx, true)
|
||||
if purgeErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
@ -355,7 +357,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId())
|
||||
|
||||
// Find the volume using the provided VolumeID
|
||||
volOptions, vID, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
|
||||
volOptions, vID, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
|
||||
if err != nil {
|
||||
// if error is ErrPoolNotFound, the pool is already deleted we dont
|
||||
// need to worry about deleting subvolume or omap data, return success
|
||||
@ -386,7 +388,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(volOptions.RequestName)
|
||||
|
||||
if err = core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
if err = store.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -410,7 +412,8 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
if err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false); err != nil {
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err = volClient.PurgeVolume(ctx, false); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
@ -421,7 +424,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
}
|
||||
|
||||
if err := core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
if err := store.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -484,7 +487,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
volOptions, volIdentifier, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
volOptions, volIdentifier, err := store.NewVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
@ -493,8 +496,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
defer volOptions.Destroy()
|
||||
|
||||
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
|
||||
if err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
|
||||
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if err = volClient.ResizeVolume(ctx, RoundOffSize); err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -521,7 +524,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
clusterData, err := core.GetClusterInformation(req.GetParameters())
|
||||
clusterData, err := store.GetClusterInformation(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -545,7 +548,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
defer cs.OperationLocks.ReleaseSnapshotCreateLock(sourceVolID)
|
||||
|
||||
// Find the volume using the provided VolumeID
|
||||
parentVolOptions, vid, err := core.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
|
||||
parentVolOptions, vid, err := store.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
|
||||
@ -569,7 +572,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
parentVolOptions.ClusterID)
|
||||
}
|
||||
|
||||
cephfsSnap, genSnapErr := core.GenSnapFromOptions(ctx, req)
|
||||
cephfsSnap, genSnapErr := store.GenSnapFromOptions(ctx, req)
|
||||
if genSnapErr != nil {
|
||||
return nil, status.Error(codes.Internal, genSnapErr.Error())
|
||||
}
|
||||
@ -582,7 +585,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(sourceVolID)
|
||||
snapName := req.GetName()
|
||||
sid, snapInfo, err := core.CheckSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
sid, snapInfo, err := store.CheckSnapExists(ctx, parentVolOptions, cephfsSnap, cr)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -591,8 +594,12 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// ceph fs subvolume info command got added in 14.2.10 and 15.+
|
||||
// as we are not able to retrieve the parent size we are rejecting the
|
||||
// request to create snapshot.
|
||||
// TODO: For this purpose we could make use of cached clusterAdditionalInfo too.
|
||||
info, err := parentVolOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
// TODO: For this purpose we could make use of cached clusterAdditionalInfo
|
||||
// too.
|
||||
volClient := core.NewSubVolume(parentVolOptions.GetConnection(),
|
||||
&parentVolOptions.SubVolume,
|
||||
parentVolOptions.ClusterID)
|
||||
info, err := volClient.GetSubVolumeInfo(ctx)
|
||||
if err != nil {
|
||||
// Check error code value against ErrInvalidCommand to understand the cluster
|
||||
// support it or not, It's safe to evaluate as the filtering
|
||||
@ -603,7 +610,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
"subvolume info command not supported in current ceph cluster")
|
||||
}
|
||||
if sid != nil {
|
||||
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
|
||||
errDefer := store.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
@ -617,7 +624,8 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// check snapshot is protected
|
||||
protected := true
|
||||
if !(snapInfo.Protected == core.SnapshotIsProtected) {
|
||||
err = parentVolOptions.ProtectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(vid.FsSubvolName))
|
||||
snapClient := core.NewSnapshot(parentVolOptions.GetConnection(), sid.FsSnapshotName, &parentVolOptions.SubVolume)
|
||||
err = snapClient.ProtectSnapshot(ctx)
|
||||
if err != nil {
|
||||
protected = false
|
||||
log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
|
||||
@ -637,20 +645,20 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
|
||||
// Reservation
|
||||
sID, err := core.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
sID, err := store.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
|
||||
errDefer := store.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
}
|
||||
}
|
||||
}()
|
||||
snap, err := doSnapshot(ctx, parentVolOptions, vid.FsSubvolName, sID.FsSnapshotName)
|
||||
snap, err := doSnapshot(ctx, parentVolOptions, sID.FsSnapshotName)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -668,13 +676,12 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
|
||||
func doSnapshot(
|
||||
ctx context.Context,
|
||||
volOpt *core.VolumeOptions,
|
||||
subvolumeName,
|
||||
volOpt *store.VolumeOptions,
|
||||
snapshotName string) (core.SnapshotInfo, error) {
|
||||
volID := fsutil.VolumeID(subvolumeName)
|
||||
snapID := fsutil.VolumeID(snapshotName)
|
||||
snap := core.SnapshotInfo{}
|
||||
err := volOpt.CreateSnapshot(ctx, snapID, volID)
|
||||
snapClient := core.NewSnapshot(volOpt.GetConnection(), snapshotName, &volOpt.SubVolume)
|
||||
err := snapClient.CreateSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
|
||||
|
||||
@ -682,13 +689,13 @@ func doSnapshot(
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
dErr := volOpt.DeleteSnapshot(ctx, snapID, volID)
|
||||
dErr := snapClient.DeleteSnapshot(ctx)
|
||||
if dErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
snap, err = volOpt.GetSnapshotInfo(ctx, snapID, volID)
|
||||
snap, err = snapClient.GetSnapshotInfo(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
|
||||
|
||||
@ -700,7 +707,7 @@ func doSnapshot(
|
||||
return snap, err
|
||||
}
|
||||
snap.CreationTime = t
|
||||
err = volOpt.ProtectSnapshot(ctx, snapID, volID)
|
||||
err = snapClient.ProtectSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
|
||||
}
|
||||
@ -764,7 +771,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
||||
|
||||
volOpt, snapInfo, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
volOpt, snapInfo, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
@ -779,10 +786,10 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// success as deletion is complete
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
case errors.Is(err, cerrors.ErrSnapNotFound):
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
|
||||
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -792,10 +799,10 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// if the error is ErrVolumeNotFound, the subvolume is already deleted
|
||||
// from backend, Hence undo the omap entries and return success
|
||||
log.ErrorLog(ctx, "Volume not present")
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
|
||||
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -819,17 +826,18 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if snapInfo.HasPendingClones == "yes" {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s has pending clones", snapshotID)
|
||||
}
|
||||
snapClient := core.NewSnapshot(volOpt.GetConnection(), sid.FsSnapshotName, &volOpt.SubVolume)
|
||||
if snapInfo.Protected == core.SnapshotIsProtected {
|
||||
err = volOpt.UnprotectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
err = snapClient.UnprotectSnapshot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
err = volOpt.DeleteSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
err = snapClient.DeleteSnapshot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
|
||||
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"errors"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
@ -29,16 +28,16 @@ import (
|
||||
type cephFSCloneState string
|
||||
|
||||
const (
|
||||
// cephFSCloneError indicates that fetching the clone state returned an error.
|
||||
cephFSCloneError = cephFSCloneState("")
|
||||
// cephFSCloneFailed indicates that clone is in failed state.
|
||||
cephFSCloneFailed = cephFSCloneState("failed")
|
||||
// cephFSClonePending indicates that clone is in pending state.
|
||||
cephFSClonePending = cephFSCloneState("pending")
|
||||
// cephFSCloneInprogress indicates that clone is in in-progress state.
|
||||
cephFSCloneInprogress = cephFSCloneState("in-progress")
|
||||
// cephFSCloneComplete indicates that clone is in complete state.
|
||||
cephFSCloneComplete = cephFSCloneState("complete")
|
||||
// CephFSCloneError indicates that fetching the clone state returned an error.
|
||||
CephFSCloneError = cephFSCloneState("")
|
||||
// CephFSCloneFailed indicates that clone is in failed state.
|
||||
CephFSCloneFailed = cephFSCloneState("failed")
|
||||
// CephFSClonePending indicates that clone is in pending state.
|
||||
CephFSClonePending = cephFSCloneState("pending")
|
||||
// CephFSCloneInprogress indicates that clone is in in-progress state.
|
||||
CephFSCloneInprogress = cephFSCloneState("in-progress")
|
||||
// CephFSCloneComplete indicates that clone is in complete state.
|
||||
CephFSCloneComplete = cephFSCloneState("complete")
|
||||
|
||||
// SnapshotIsProtected string indicates that the snapshot is currently protected.
|
||||
SnapshotIsProtected = "yes"
|
||||
@ -47,28 +46,28 @@ const (
|
||||
// toError checks the state of the clone if it's not cephFSCloneComplete.
|
||||
func (cs cephFSCloneState) toError() error {
|
||||
switch cs {
|
||||
case cephFSCloneComplete:
|
||||
case CephFSCloneComplete:
|
||||
return nil
|
||||
case cephFSCloneError:
|
||||
case CephFSCloneError:
|
||||
return cerrors.ErrInvalidClone
|
||||
case cephFSCloneInprogress:
|
||||
case CephFSCloneInprogress:
|
||||
return cerrors.ErrCloneInProgress
|
||||
case cephFSClonePending:
|
||||
case CephFSClonePending:
|
||||
return cerrors.ErrClonePending
|
||||
case cephFSCloneFailed:
|
||||
case CephFSCloneFailed:
|
||||
return cerrors.ErrCloneFailed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateCloneFromSubvolume(
|
||||
// CreateCloneFromSubvolume creates a clone from a subvolume.
|
||||
func (s *subVolumeClient) CreateCloneFromSubvolume(
|
||||
ctx context.Context,
|
||||
volID, cloneID fsutil.VolumeID,
|
||||
volOpt,
|
||||
parentvolOpt *VolumeOptions) error {
|
||||
snapshotID := cloneID
|
||||
err := parentvolOpt.CreateSnapshot(ctx, snapshotID, volID)
|
||||
parentvolOpt *SubVolume) error {
|
||||
snapshotID := s.VolID
|
||||
snapClient := NewSnapshot(s.conn, snapshotID, parentvolOpt)
|
||||
err := snapClient.CreateSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
|
||||
|
||||
@ -82,17 +81,17 @@ func CreateCloneFromSubvolume(
|
||||
)
|
||||
defer func() {
|
||||
if protectErr != nil {
|
||||
err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID)
|
||||
err = snapClient.DeleteSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if cloneErr != nil {
|
||||
if err = volOpt.PurgeVolume(ctx, cloneID, true); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
||||
if err = s.PurgeVolume(ctx, true); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", s.VolID, err)
|
||||
}
|
||||
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = snapClient.UnprotectSnapshot(ctx); err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
@ -100,47 +99,46 @@ func CreateCloneFromSubvolume(
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = snapClient.DeleteSnapshot(ctx); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
protectErr = parentvolOpt.ProtectSnapshot(ctx, snapshotID, volID)
|
||||
protectErr = snapClient.ProtectSnapshot(ctx)
|
||||
if protectErr != nil {
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
|
||||
|
||||
return protectErr
|
||||
}
|
||||
|
||||
cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt)
|
||||
cloneErr = snapClient.CloneSnapshot(ctx, s.SubVolume)
|
||||
if cloneErr != nil {
|
||||
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr)
|
||||
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", parentvolOpt.VolID, snapshotID, s.VolID, cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID)
|
||||
cloneState, cloneErr := s.GetCloneState(ctx)
|
||||
if cloneErr != nil {
|
||||
log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
if cloneState != cephFSCloneComplete {
|
||||
log.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError())
|
||||
if cloneState != CephFSCloneComplete {
|
||||
log.ErrorLog(ctx, "clone %s did not complete: %v", s.VolID, cloneState.toError())
|
||||
|
||||
return cloneState.toError()
|
||||
}
|
||||
|
||||
err = volOpt.ExpandVolume(ctx, cloneID, volOpt.Size)
|
||||
err = s.ExpandVolume(ctx, s.Size)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", s.VolID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// As we completed clone, remove the intermediate snap
|
||||
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = snapClient.UnprotectSnapshot(ctx); err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
@ -150,7 +148,7 @@ func CreateCloneFromSubvolume(
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = snapClient.DeleteSnapshot(ctx); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
@ -159,14 +157,14 @@ func CreateCloneFromSubvolume(
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx context.Context,
|
||||
volID, cloneID fsutil.VolumeID,
|
||||
parentVolOpt *VolumeOptions) error {
|
||||
// CleanupSnapshotFromSubvolume removes the snapshot from the subvolume.
|
||||
func (s *subVolumeClient) CleanupSnapshotFromSubvolume(
|
||||
ctx context.Context, parentVol *SubVolume) error {
|
||||
// snapshot name is same as clone name as we need a name which can be
|
||||
// identified during PVC-PVC cloning.
|
||||
snapShotID := cloneID
|
||||
snapInfo, err := parentVolOpt.GetSnapshotInfo(ctx, snapShotID, volID)
|
||||
snapShotID := s.VolID
|
||||
snapClient := NewSnapshot(s.conn, snapShotID, parentVol)
|
||||
snapInfo, err := snapClient.GetSnapshotInfo(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
return nil
|
||||
@ -176,14 +174,14 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
}
|
||||
|
||||
if snapInfo.Protected == SnapshotIsProtected {
|
||||
err = parentVolOpt.UnprotectSnapshot(ctx, snapShotID, volID)
|
||||
err = snapClient.UnprotectSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = parentVolOpt.DeleteSnapshot(ctx, snapShotID, volID)
|
||||
err = snapClient.DeleteSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
|
||||
|
||||
@ -193,45 +191,39 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateCloneFromSnapshot(
|
||||
ctx context.Context,
|
||||
parentVolOpt, volOptions *VolumeOptions,
|
||||
vID *VolumeIdentifier,
|
||||
sID *SnapshotIdentifier) error {
|
||||
snapID := fsutil.VolumeID(sID.FsSnapshotName)
|
||||
err := parentVolOpt.cloneSnapshot(
|
||||
ctx,
|
||||
fsutil.VolumeID(sID.FsSubvolName),
|
||||
snapID,
|
||||
fsutil.VolumeID(vID.FsSubvolName),
|
||||
volOptions)
|
||||
// CreateSnapshotFromSubvolume creates a clone from subvolume snapshot.
|
||||
func (s *subVolumeClient) CreateCloneFromSnapshot(
|
||||
ctx context.Context, snap Snapshot) error {
|
||||
snapID := snap.SnapshotID
|
||||
snapClient := NewSnapshot(s.conn, snapID, snap.SubVolume)
|
||||
err := snapClient.CloneSnapshot(ctx, s.SubVolume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !cerrors.IsCloneRetryError(err) {
|
||||
if dErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true); dErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
|
||||
if dErr := s.PurgeVolume(ctx, true); dErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", s.VolID, dErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
cloneState, err := volOptions.getCloneState(ctx, fsutil.VolumeID(vID.FsSubvolName))
|
||||
cloneState, err := s.GetCloneState(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get clone state: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if cloneState != cephFSCloneComplete {
|
||||
if cloneState != CephFSCloneComplete {
|
||||
return cloneState.toError()
|
||||
}
|
||||
|
||||
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
|
||||
err = s.ExpandVolume(ctx, s.Size)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", s.VolID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -239,24 +231,25 @@ func CreateCloneFromSnapshot(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) getCloneState(ctx context.Context, volID fsutil.VolumeID) (cephFSCloneState, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// GetCloneState returns the clone state of the subvolume.
|
||||
func (s *subVolumeClient) GetCloneState(ctx context.Context) (cephFSCloneState, error) {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"could not get FSAdmin, can get clone status for volume %s with ID %s: %v",
|
||||
vo.FsName,
|
||||
string(volID),
|
||||
s.FsName,
|
||||
s.VolID,
|
||||
err)
|
||||
|
||||
return cephFSCloneError, err
|
||||
return CephFSCloneError, err
|
||||
}
|
||||
|
||||
cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
cs, err := fsa.CloneStatus(s.FsName, s.SubvolumeGroup, s.VolID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err)
|
||||
log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", s.FsName, s.VolID, err)
|
||||
|
||||
return cephFSCloneError, err
|
||||
return CephFSCloneError, err
|
||||
}
|
||||
|
||||
return cephFSCloneState(cs.State), nil
|
||||
|
@ -27,11 +27,11 @@ import (
|
||||
func TestCloneStateToError(t *testing.T) {
|
||||
t.Parallel()
|
||||
errorState := make(map[cephFSCloneState]error)
|
||||
errorState[cephFSCloneComplete] = nil
|
||||
errorState[cephFSCloneError] = cerrors.ErrInvalidClone
|
||||
errorState[cephFSCloneInprogress] = cerrors.ErrCloneInProgress
|
||||
errorState[cephFSClonePending] = cerrors.ErrClonePending
|
||||
errorState[cephFSCloneFailed] = cerrors.ErrCloneFailed
|
||||
errorState[CephFSCloneComplete] = nil
|
||||
errorState[CephFSCloneError] = cerrors.ErrInvalidClone
|
||||
errorState[CephFSCloneInprogress] = cerrors.ErrCloneInProgress
|
||||
errorState[CephFSClonePending] = cerrors.ErrClonePending
|
||||
errorState[CephFSCloneFailed] = cerrors.ErrCloneFailed
|
||||
|
||||
for state, err := range errorState {
|
||||
assert.Equal(t, state.toError(), err)
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/ceph/go-ceph/cephfs/admin"
|
||||
@ -36,32 +36,59 @@ const (
|
||||
autoProtect = "snapshot-autoprotect"
|
||||
)
|
||||
|
||||
// CephfsSnapshot represents a CSI snapshot and its cluster information.
|
||||
type CephfsSnapshot struct {
|
||||
NamePrefix string
|
||||
Monitors string
|
||||
// MetadataPool & Pool fields are not used atm. But its definitely good to have it in this struct
|
||||
// so keeping it here
|
||||
MetadataPool string
|
||||
Pool string
|
||||
ClusterID string
|
||||
RequestName string
|
||||
// ReservedID represents the ID reserved for a snapshot
|
||||
ReservedID string
|
||||
// SnapshotClient is the interface that holds the signature of snapshot methods
|
||||
// that interacts with CephFS snapshot API's.
|
||||
type SnapshotClient interface {
|
||||
// CreateSnapshot creates a snapshot of the subvolume.
|
||||
CreateSnapshot(ctx context.Context) error
|
||||
// DeleteSnapshot deletes the snapshot of the subvolume.
|
||||
DeleteSnapshot(ctx context.Context) error
|
||||
// GetSnapshotInfo returns the snapshot info of the subvolume.
|
||||
GetSnapshotInfo(ctx context.Context) (SnapshotInfo, error)
|
||||
// ProtectSnapshot protects the snapshot of the subvolume.
|
||||
ProtectSnapshot(ctx context.Context) error
|
||||
// UnprotectSnapshot unprotects the snapshot of the subvolume.
|
||||
UnprotectSnapshot(ctx context.Context) error
|
||||
// CloneSnapshot clones the snapshot of the subvolume.
|
||||
CloneSnapshot(ctx context.Context, cloneVolOptions *SubVolume) error
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// snapshotClient is the implementation of SnapshotClient interface.
|
||||
type snapshotClient struct {
|
||||
*Snapshot // Embedded snapshot struct.
|
||||
conn *util.ClusterConnection // Cluster connection.
|
||||
}
|
||||
|
||||
// Snapshot represents a subvolume snapshot and its cluster information.
|
||||
type Snapshot struct {
|
||||
SnapshotID string // subvolume snapshot id.
|
||||
*SubVolume // parent subvolume information.
|
||||
}
|
||||
|
||||
// NewSnapshot creates a new snapshot client.
|
||||
func NewSnapshot(conn *util.ClusterConnection, snapshotID string, vol *SubVolume) SnapshotClient {
|
||||
return &snapshotClient{
|
||||
Snapshot: &Snapshot{
|
||||
SnapshotID: snapshotID,
|
||||
SubVolume: vol,
|
||||
},
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a snapshot of the subvolume.
|
||||
func (s *snapshotClient) CreateSnapshot(ctx context.Context) error {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
|
||||
err = fsa.CreateSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
s.SnapshotID, s.VolID, s.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -69,18 +96,19 @@ func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsuti
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) DeleteSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// DeleteSnapshot deletes the snapshot of the subvolume.
|
||||
func (s *snapshotClient) DeleteSnapshot(ctx context.Context) error {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.ForceRemoveSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
|
||||
err = fsa.ForceRemoveSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
s.SnapshotID, s.VolID, s.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -95,16 +123,17 @@ type SnapshotInfo struct {
|
||||
Protected string
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsutil.VolumeID) (SnapshotInfo, error) {
|
||||
// GetSnapshotInfo returns the snapshot info of the subvolume.
|
||||
func (s *snapshotClient) GetSnapshotInfo(ctx context.Context) (SnapshotInfo, error) {
|
||||
snap := SnapshotInfo{}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
|
||||
info, err := fsa.SubVolumeSnapshotInfo(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
|
||||
info, err := fsa.SubVolumeSnapshotInfo(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
|
||||
if err != nil {
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return snap, cerrors.ErrSnapNotFound
|
||||
@ -112,9 +141,9 @@ func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsut
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to get subvolume snapshot info %s %s in fs %s with error %s",
|
||||
string(volID),
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
s.VolID,
|
||||
s.SnapshotID,
|
||||
s.FsName,
|
||||
err)
|
||||
|
||||
return snap, err
|
||||
@ -126,21 +155,21 @@ func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsut
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
// ProtectSnapshot protects the snapshot of the subvolume.
|
||||
func (s *snapshotClient) ProtectSnapshot(ctx context.Context) error {
|
||||
// If "snapshot-autoprotect" feature is present, The ProtectSnapshot
|
||||
// call should be treated as a no-op.
|
||||
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
|
||||
if checkSubvolumeHasFeature(autoProtect, s.Features) {
|
||||
return nil
|
||||
}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.ProtectSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID),
|
||||
string(snapID))
|
||||
err = fsa.ProtectSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
|
||||
if err != nil {
|
||||
if errors.Is(err, rados.ErrObjectExists) {
|
||||
return nil
|
||||
@ -148,9 +177,9 @@ func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsut
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to protect subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
s.VolID,
|
||||
s.SnapshotID,
|
||||
s.FsName,
|
||||
err)
|
||||
|
||||
return err
|
||||
@ -159,21 +188,22 @@ func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsut
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
// UnprotectSnapshot unprotects the snapshot of the subvolume.
|
||||
func (s *snapshotClient) UnprotectSnapshot(ctx context.Context) error {
|
||||
// If "snapshot-autoprotect" feature is present, The UnprotectSnapshot
|
||||
// call should be treated as a no-op.
|
||||
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
|
||||
if checkSubvolumeHasFeature(autoProtect, s.Features) {
|
||||
return nil
|
||||
}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.UnprotectSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID),
|
||||
string(snapID))
|
||||
err = fsa.UnprotectSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID,
|
||||
s.SnapshotID)
|
||||
if err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error.
|
||||
@ -183,9 +213,9 @@ func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fs
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to unprotect subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
string(snapID),
|
||||
vo.FsName,
|
||||
s.VolID,
|
||||
s.SnapshotID,
|
||||
s.FsName,
|
||||
err)
|
||||
|
||||
return err
|
||||
@ -194,33 +224,33 @@ func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) cloneSnapshot(
|
||||
// CloneSnapshot clones the snapshot of the subvolume.
|
||||
func (s *snapshotClient) CloneSnapshot(
|
||||
ctx context.Context,
|
||||
volID, snapID, cloneID fsutil.VolumeID,
|
||||
cloneVolOptions *VolumeOptions,
|
||||
cloneSubVol *SubVolume,
|
||||
) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
co := &admin.CloneOptions{
|
||||
TargetGroup: cloneVolOptions.SubvolumeGroup,
|
||||
TargetGroup: cloneSubVol.SubvolumeGroup,
|
||||
}
|
||||
if cloneVolOptions.Pool != "" {
|
||||
co.PoolLayout = cloneVolOptions.Pool
|
||||
if cloneSubVol.Pool != "" {
|
||||
co.PoolLayout = cloneSubVol.Pool
|
||||
}
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID), string(cloneID), co)
|
||||
err = fsa.CloneSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID, cloneSubVol.VolID, co)
|
||||
if err != nil {
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to clone subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
string(snapID),
|
||||
string(cloneID),
|
||||
vo.FsName,
|
||||
s.VolID,
|
||||
s.SnapshotID,
|
||||
cloneSubVol.VolID,
|
||||
s.FsName,
|
||||
err)
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return cerrors.ErrVolumeNotFound
|
||||
|
@ -53,20 +53,75 @@ type Subvolume struct {
|
||||
Features []string
|
||||
}
|
||||
|
||||
// SubVolumeClient is the interface that holds the signature of subvolume methods
|
||||
// that interacts with CephFS subvolume API's.
|
||||
type SubVolumeClient interface {
|
||||
// GetVolumeRootPathCeph returns the root path of the subvolume.
|
||||
GetVolumeRootPathCeph(ctx context.Context) (string, error)
|
||||
// CreateVolume creates a subvolume.
|
||||
CreateVolume(ctx context.Context) error
|
||||
// GetSubVolumeInfo returns the subvolume information.
|
||||
GetSubVolumeInfo(ctx context.Context) (*Subvolume, error)
|
||||
// ExpandVolume expands the volume if the requested size is greater than
|
||||
// the subvolume size.
|
||||
ExpandVolume(ctx context.Context, bytesQuota int64) error
|
||||
// ResizeVolume resizes the volume.
|
||||
ResizeVolume(ctx context.Context, bytesQuota int64) error
|
||||
// PurgSubVolume removes the subvolume.
|
||||
PurgeVolume(ctx context.Context, force bool) error
|
||||
|
||||
// CreateCloneFromSubVolume creates a clone from the subvolume.
|
||||
CreateCloneFromSubvolume(ctx context.Context, parentvolOpt *SubVolume) error
|
||||
// GetCloneState returns the clone state of the subvolume.
|
||||
GetCloneState(ctx context.Context) (cephFSCloneState, error)
|
||||
// CreateCloneFromSnapshot creates a clone from the subvolume snapshot.
|
||||
CreateCloneFromSnapshot(ctx context.Context, snap Snapshot) error
|
||||
// CleanupSnapshotFromSubvolume removes the snapshot from the subvolume.
|
||||
CleanupSnapshotFromSubvolume(ctx context.Context, parentVol *SubVolume) error
|
||||
}
|
||||
|
||||
// subVolumeClient implements SubVolumeClient interface.
|
||||
type subVolumeClient struct {
|
||||
*SubVolume // Embedded SubVolume struct.
|
||||
clusterID string // Cluster ID to check subvolumegroup and resize functionality.
|
||||
conn *util.ClusterConnection // Cluster connection.
|
||||
}
|
||||
|
||||
// SubVolume holds the information about the subvolume.
|
||||
type SubVolume struct {
|
||||
VolID string // subvolume id.
|
||||
FsName string // filesystem name.
|
||||
SubvolumeGroup string // subvolume group name where subvolume will be created.
|
||||
Pool string // pool name where subvolume will be created.
|
||||
Features []string // subvolume features.
|
||||
Size int64 // subvolume size.
|
||||
}
|
||||
|
||||
// NewSubVolume returns a new subvolume client.
|
||||
func NewSubVolume(conn *util.ClusterConnection, vol *SubVolume, clusterID string) SubVolumeClient {
|
||||
return &subVolumeClient{
|
||||
SubVolume: vol,
|
||||
clusterID: clusterID,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
// GetVolumeRootPathCephDeprecated returns the root path of the subvolume.
|
||||
func GetVolumeRootPathCephDeprecated(volID fsutil.VolumeID) string {
|
||||
return path.Join("/", "csi-volumes", string(volID))
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil.VolumeID) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// GetVolumeRootPathCeph returns the root path of the subvolume.
|
||||
func (s *subVolumeClient) GetVolumeRootPathCeph(ctx context.Context) (string, error) {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
svPath, err := fsa.SubVolumePath(s.FsName, s.SubvolumeGroup, s.VolID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", s.VolID, err)
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return "", util.JoinErrors(cerrors.ErrVolumeNotFound, err)
|
||||
}
|
||||
@ -77,17 +132,18 @@ func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil
|
||||
return svPath, nil
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.VolumeID) (*Subvolume, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// GetSubVolumeInfo returns the subvolume information.
|
||||
func (s *subVolumeClient) GetSubVolumeInfo(ctx context.Context) (*Subvolume, error) {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", s.FsName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err := fsa.SubVolumeInfo(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
info, err := fsa.SubVolumeInfo(s.FsName, s.SubvolumeGroup, s.VolID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", s.VolID, err)
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return nil, cerrors.ErrVolumeNotFound
|
||||
}
|
||||
@ -111,7 +167,7 @@ func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.Volu
|
||||
// or nil (in case the subvolume is in snapshot-retained state),
|
||||
// just continue without returning quota information.
|
||||
if !(info.BytesQuota == fsAdmin.Infinite || info.State == fsAdmin.StateSnapRetained) {
|
||||
return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", string(volID), info.BytesQuota)
|
||||
return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", s.VolID, info.BytesQuota)
|
||||
}
|
||||
} else {
|
||||
subvol.BytesQuota = int64(bc)
|
||||
@ -140,50 +196,52 @@ type localClusterState struct {
|
||||
subVolumeGroupCreated bool
|
||||
}
|
||||
|
||||
func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.VolumeID, bytesQuota int64) error {
|
||||
// verify if corresponding ClusterID key is present in the map,
|
||||
// CreateVolume creates a subvolume.
|
||||
func (s *subVolumeClient) CreateVolume(ctx context.Context) error {
|
||||
// verify if corresponding clusterID key is present in the map,
|
||||
// and if not, initialize with default values(false).
|
||||
if _, keyPresent := clusterAdditionalInfo[volOptions.ClusterID]; !keyPresent {
|
||||
clusterAdditionalInfo[volOptions.ClusterID] = &localClusterState{}
|
||||
if _, keyPresent := clusterAdditionalInfo[s.clusterID]; !keyPresent {
|
||||
clusterAdditionalInfo[s.clusterID] = &localClusterState{}
|
||||
}
|
||||
|
||||
ca, err := volOptions.conn.GetFSAdmin()
|
||||
ca, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", s.VolID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// create subvolumegroup if not already created for the cluster.
|
||||
if !clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated {
|
||||
if !clusterAdditionalInfo[s.clusterID].subVolumeGroupCreated {
|
||||
opts := fsAdmin.SubVolumeGroupOptions{}
|
||||
err = ca.CreateSubVolumeGroup(volOptions.FsName, volOptions.SubvolumeGroup, &opts)
|
||||
err = ca.CreateSubVolumeGroup(s.FsName, s.SubvolumeGroup, &opts)
|
||||
if err != nil {
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to create subvolume group %s, for the vol %s: %s",
|
||||
volOptions.SubvolumeGroup,
|
||||
string(volID),
|
||||
s.SubvolumeGroup,
|
||||
s.VolID,
|
||||
err)
|
||||
|
||||
return err
|
||||
}
|
||||
log.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
|
||||
clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true
|
||||
log.DebugLog(ctx, "cephfs: created subvolume group %s", s.SubvolumeGroup)
|
||||
clusterAdditionalInfo[s.clusterID].subVolumeGroupCreated = true
|
||||
}
|
||||
|
||||
opts := fsAdmin.SubVolumeOptions{
|
||||
Size: fsAdmin.ByteCount(bytesQuota),
|
||||
Size: fsAdmin.ByteCount(s.Size),
|
||||
Mode: modeAllRWX,
|
||||
}
|
||||
if volOptions.Pool != "" {
|
||||
opts.PoolLayout = volOptions.Pool
|
||||
if s.Pool != "" {
|
||||
opts.PoolLayout = s.Pool
|
||||
}
|
||||
|
||||
fmt.Println("this is for debugging ")
|
||||
// FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID)
|
||||
err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts)
|
||||
err = ca.CreateSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, &opts)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -193,16 +251,16 @@ func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.V
|
||||
|
||||
// ExpandVolume will expand the volume if the requested size is greater than
|
||||
// the subvolume size.
|
||||
func (vo *VolumeOptions) ExpandVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error {
|
||||
func (s *subVolumeClient) ExpandVolume(ctx context.Context, bytesQuota int64) error {
|
||||
// get the subvolume size for comparison with the requested size.
|
||||
info, err := vo.GetSubVolumeInfo(ctx, volID)
|
||||
info, err := s.GetSubVolumeInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// resize if the requested size is greater than the current size.
|
||||
if vo.Size > info.BytesQuota {
|
||||
log.DebugLog(ctx, "clone %s size %d is greater than requested size %d", volID, info.BytesQuota, bytesQuota)
|
||||
err = vo.ResizeVolume(ctx, volID, bytesQuota)
|
||||
if s.Size > info.BytesQuota {
|
||||
log.DebugLog(ctx, "clone %s size %d is greater than requested size %d", s.VolID, info.BytesQuota, bytesQuota)
|
||||
err = s.ResizeVolume(ctx, bytesQuota)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -211,45 +269,47 @@ func (vo *VolumeOptions) ExpandVolume(ctx context.Context, volID fsutil.VolumeID
|
||||
// ResizeVolume will try to use ceph fs subvolume resize command to resize the
|
||||
// subvolume. If the command is not available as a fallback it will use
|
||||
// CreateVolume to resize the subvolume.
|
||||
func (vo *VolumeOptions) ResizeVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error {
|
||||
func (s *subVolumeClient) ResizeVolume(ctx context.Context, bytesQuota int64) error {
|
||||
// keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo
|
||||
var keyPresent bool
|
||||
// verify if corresponding ClusterID key is present in the map,
|
||||
// verify if corresponding clusterID key is present in the map,
|
||||
// and if not, initialize with default values(false).
|
||||
if _, keyPresent = clusterAdditionalInfo[vo.ClusterID]; !keyPresent {
|
||||
clusterAdditionalInfo[vo.ClusterID] = &localClusterState{}
|
||||
if _, keyPresent = clusterAdditionalInfo[s.clusterID]; !keyPresent {
|
||||
clusterAdditionalInfo[s.clusterID] = &localClusterState{}
|
||||
}
|
||||
// resize subvolume when either it's supported, or when corresponding
|
||||
// clusterID key was not present.
|
||||
if clusterAdditionalInfo[vo.ClusterID].resizeState == unknown ||
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState == supported {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if clusterAdditionalInfo[s.clusterID].resizeState == unknown ||
|
||||
clusterAdditionalInfo[s.clusterID].resizeState == supported {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", s.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
_, err = fsa.ResizeSubVolume(vo.FsName, vo.SubvolumeGroup, string(volID), fsAdmin.ByteCount(bytesQuota), true)
|
||||
_, err = fsa.ResizeSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, fsAdmin.ByteCount(bytesQuota), true)
|
||||
if err == nil {
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState = supported
|
||||
clusterAdditionalInfo[s.clusterID].resizeState = supported
|
||||
|
||||
return nil
|
||||
}
|
||||
var invalid fsAdmin.NotImplementedError
|
||||
// In case the error is other than invalid command return error to the caller.
|
||||
if !errors.As(err, &invalid) {
|
||||
log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState = unsupported
|
||||
clusterAdditionalInfo[s.clusterID].resizeState = unsupported
|
||||
s.Size = bytesQuota
|
||||
|
||||
return CreateVolume(ctx, vo, volID, bytesQuota)
|
||||
return s.CreateVolume(ctx)
|
||||
}
|
||||
|
||||
func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID, force bool) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
// PurgSubVolume removes the subvolume.
|
||||
func (s *subVolumeClient) PurgeVolume(ctx context.Context, force bool) error {
|
||||
fsa, err := s.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin %s:", err)
|
||||
|
||||
@ -259,13 +319,13 @@ func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID,
|
||||
opt := fsAdmin.SubVolRmFlags{}
|
||||
opt.Force = force
|
||||
|
||||
if checkSubvolumeHasFeature("snapshot-retention", vo.Features) {
|
||||
if checkSubvolumeHasFeature("snapshot-retention", s.Features) {
|
||||
opt.RetainSnapshots = true
|
||||
}
|
||||
|
||||
err = fsa.RemoveSubVolumeWithFlags(vo.FsName, vo.SubvolumeGroup, string(volID), opt)
|
||||
err = fsa.RemoveSubVolumeWithFlags(s.FsName, s.SubvolumeGroup, s.VolID, opt)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
|
||||
if strings.Contains(err.Error(), cerrors.VolumeNotEmpty) {
|
||||
return util.JoinErrors(cerrors.ErrVolumeHasSnapshots, err)
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
@ -87,9 +87,9 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
CSIInstanceID = conf.InstanceID
|
||||
}
|
||||
// Create an instance of the volume journal
|
||||
core.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
|
||||
core.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
// Initialize default library driver
|
||||
|
||||
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
@ -47,7 +47,7 @@ var (
|
||||
|
||||
type FuseMounter struct{}
|
||||
|
||||
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error {
|
||||
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error {
|
||||
args := []string{
|
||||
mountPoint,
|
||||
"-m", volOptions.Monitors,
|
||||
@ -99,7 +99,7 @@ func (m *FuseMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *core.VolumeOptions) error {
|
||||
volOptions *store.VolumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
)
|
||||
|
||||
@ -31,7 +31,7 @@ const (
|
||||
|
||||
type KernelMounter struct{}
|
||||
|
||||
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error {
|
||||
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error {
|
||||
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -63,7 +63,7 @@ func (m *KernelMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *core.VolumeOptions) error {
|
||||
volOptions *store.VolumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
@ -99,11 +99,11 @@ func LoadAvailableMounters(conf *util.Config) error {
|
||||
}
|
||||
|
||||
type VolumeMounter interface {
|
||||
Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error
|
||||
Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error
|
||||
Name() string
|
||||
}
|
||||
|
||||
func New(volOptions *core.VolumeOptions) (VolumeMounter, error) {
|
||||
func New(volOptions *store.VolumeOptions) (VolumeMounter, error) {
|
||||
// Get the mounter from the configuration
|
||||
|
||||
wantMounter := volOptions.Mounter
|
||||
|
@ -23,9 +23,9 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
@ -46,7 +46,7 @@ type NodeServer struct {
|
||||
}
|
||||
|
||||
func getCredentialsForVolume(
|
||||
volOptions *core.VolumeOptions,
|
||||
volOptions *store.VolumeOptions,
|
||||
req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
|
||||
var (
|
||||
err error
|
||||
@ -77,7 +77,7 @@ func getCredentialsForVolume(
|
||||
func (ns *NodeServer) NodeStageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
var volOptions *core.VolumeOptions
|
||||
var volOptions *store.VolumeOptions
|
||||
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -94,21 +94,21 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer ns.VolumeLocks.Release(req.GetVolumeId())
|
||||
|
||||
volOptions, _, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
|
||||
volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrInvalidVolID) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// gets mon IPs from the supplied cluster info
|
||||
volOptions, _, err = core.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
|
||||
volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// get mon IPs from the volume context
|
||||
volOptions, _, err = core.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
|
||||
volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
|
||||
req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -142,7 +142,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (*NodeServer) mount(ctx context.Context, volOptions *core.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
|
||||
func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
volID := fsutil.VolumeID(req.GetVolumeId())
|
||||
|
||||
|
@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
@ -95,16 +96,16 @@ func CheckVolExists(ctx context.Context,
|
||||
}
|
||||
imageUUID := imageData.ImageUUID
|
||||
vid.FsSubvolName = imageData.ImageAttributes.ImageName
|
||||
volOptions.VolID = vid.FsSubvolName
|
||||
|
||||
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
|
||||
if sID != nil || pvID != nil {
|
||||
cloneState, cloneStateErr := volOptions.getCloneState(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
cloneState, cloneStateErr := vol.GetCloneState(ctx)
|
||||
if cloneStateErr != nil {
|
||||
if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) {
|
||||
if pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx, fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
err = vol.CleanupSnapshotFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -117,29 +118,27 @@ func CheckVolExists(ctx context.Context,
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if cloneState == cephFSCloneInprogress {
|
||||
if cloneState == core.CephFSCloneInprogress {
|
||||
return nil, cerrors.ErrCloneInProgress
|
||||
}
|
||||
if cloneState == cephFSClonePending {
|
||||
if cloneState == core.CephFSClonePending {
|
||||
return nil, cerrors.ErrClonePending
|
||||
}
|
||||
if cloneState == cephFSCloneFailed {
|
||||
if cloneState == core.CephFSCloneFailed {
|
||||
log.ErrorLog(ctx,
|
||||
"clone failed, deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s",
|
||||
volOptions.FsName,
|
||||
vid.FsSubvolName,
|
||||
volOptions.SubvolumeGroup)
|
||||
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true)
|
||||
err = vol.PurgeVolume(ctx, true)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx, fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
err = vol.CleanupSnapshotFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -149,21 +148,18 @@ func CheckVolExists(ctx context.Context,
|
||||
|
||||
return nil, err
|
||||
}
|
||||
if cloneState != cephFSCloneComplete {
|
||||
if cloneState != core.CephFSCloneComplete {
|
||||
return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName)
|
||||
}
|
||||
}
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
// If the subvolume is not present, cleanup the stale snapshot
|
||||
// created for clone.
|
||||
if parentVolOpt != nil && pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx,
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
err = vol.CleanupSnapshotFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -194,11 +190,8 @@ func CheckVolExists(ctx context.Context,
|
||||
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
if parentVolOpt != nil && pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx,
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
err = vol.CleanupSnapshotFromSubvolume(
|
||||
ctx, &parentVolOpt.SubVolume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -280,7 +273,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volOptions.VolID = vid.FsSubvolName
|
||||
// generate the volume ID to return to the CO system
|
||||
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
|
||||
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
|
||||
@ -300,7 +293,7 @@ func ReserveSnap(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
parentSubVolName string,
|
||||
snap *CephfsSnapshot,
|
||||
snap *SnapshotOption,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, error) {
|
||||
var (
|
||||
vid SnapshotIdentifier
|
||||
@ -373,9 +366,8 @@ hence safe to garbage collect.
|
||||
func CheckSnapExists(
|
||||
ctx context.Context,
|
||||
volOptions *VolumeOptions,
|
||||
parentSubVolName string,
|
||||
snap *CephfsSnapshot,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, *SnapshotInfo, error) {
|
||||
snap *SnapshotOption,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, *core.SnapshotInfo, error) {
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
@ -384,7 +376,7 @@ func CheckSnapExists(
|
||||
defer j.Destroy()
|
||||
|
||||
snapData, err := j.CheckReservation(
|
||||
ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, parentSubVolName, "")
|
||||
ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -395,7 +387,8 @@ func CheckSnapExists(
|
||||
snapUUID := snapData.ImageUUID
|
||||
snapID := snapData.ImageAttributes.ImageName
|
||||
sid.FsSnapshotName = snapData.ImageAttributes.ImageName
|
||||
snapInfo, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
|
||||
snapClient := core.NewSnapshot(volOptions.conn, snapID, &volOptions.SubVolume)
|
||||
snapInfo, err := snapClient.GetSnapshotInfo(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
@ -409,7 +402,7 @@ func CheckSnapExists(
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = volOptions.DeleteSnapshot(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
|
||||
err = snapClient.DeleteSnapshot(ctx)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
|
||||
|
||||
@ -435,7 +428,7 @@ func CheckSnapExists(
|
||||
return nil, nil, err
|
||||
}
|
||||
log.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)",
|
||||
snapData.ImageAttributes.RequestName, parentSubVolName, sid.FsSnapshotName)
|
||||
snapData.ImageAttributes.RequestName, volOptions.VolID, sid.FsSnapshotName)
|
||||
|
||||
return sid, &snapInfo, nil
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
@ -32,27 +33,23 @@ import (
|
||||
)
|
||||
|
||||
type VolumeOptions struct {
|
||||
core.SubVolume
|
||||
TopologyPools *[]util.TopologyConstrainedPool
|
||||
TopologyRequirement *csi.TopologyRequirement
|
||||
Topology map[string]string
|
||||
RequestName string
|
||||
NamePrefix string
|
||||
Size int64
|
||||
ClusterID string
|
||||
FsName string
|
||||
FscID int64
|
||||
MetadataPool string
|
||||
// ReservedID represents the ID reserved for a subvolume
|
||||
ReservedID string
|
||||
MetadataPool string
|
||||
Monitors string `json:"monitors"`
|
||||
Pool string `json:"pool"`
|
||||
RootPath string `json:"rootPath"`
|
||||
Mounter string `json:"mounter"`
|
||||
ProvisionVolume bool `json:"provisionVolume"`
|
||||
KernelMountOptions string `json:"kernelMountOptions"`
|
||||
FuseMountOptions string `json:"fuseMountOptions"`
|
||||
SubvolumeGroup string
|
||||
Features []string
|
||||
|
||||
// conn is a connection to the Ceph cluster obtained from a ConnPool
|
||||
conn *util.ClusterConnection
|
||||
@ -180,6 +177,11 @@ func GetClusterInformation(options map[string]string) (*util.ClusterInfo, error)
|
||||
return clusterData, nil
|
||||
}
|
||||
|
||||
// GetConnection returns the cluster connection.
|
||||
func (vo *VolumeOptions) GetConnection() *util.ClusterConnection {
|
||||
return vo.conn
|
||||
}
|
||||
|
||||
// NewVolumeOptions generates a new instance of volumeOptions from the provided
|
||||
// CSI request parameters.
|
||||
func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
|
||||
@ -230,7 +232,7 @@ func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := NewFileSystem(opts.conn)
|
||||
fs := core.NewFileSystem(opts.conn)
|
||||
opts.FscID, err = fs.GetFscID(ctx, opts.FsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -281,6 +283,7 @@ func NewVolumeOptionsFromVolID(
|
||||
}
|
||||
volOptions.ClusterID = vi.ClusterID
|
||||
vid.VolumeID = volID
|
||||
volOptions.VolID = volID
|
||||
volOptions.FscID = vi.LocationID
|
||||
|
||||
if volOptions.Monitors, err = util.Mons(util.CsiConfigFile, vi.ClusterID); err != nil {
|
||||
@ -309,7 +312,7 @@ func NewVolumeOptionsFromVolID(
|
||||
}
|
||||
}()
|
||||
|
||||
fs := NewFileSystem(volOptions.conn)
|
||||
fs := core.NewFileSystem(volOptions.conn)
|
||||
volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -358,8 +361,9 @@ func NewVolumeOptionsFromVolID(
|
||||
}
|
||||
|
||||
volOptions.ProvisionVolume = true
|
||||
|
||||
info, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
volOptions.SubVolume.VolID = vid.FsSubvolName
|
||||
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
|
||||
info, err := vol.GetSubVolumeInfo(ctx)
|
||||
if err == nil {
|
||||
volOptions.RootPath = info.Path
|
||||
volOptions.Features = info.Features
|
||||
@ -367,7 +371,7 @@ func NewVolumeOptionsFromVolID(
|
||||
}
|
||||
|
||||
if errors.Is(err, cerrors.ErrInvalidCommand) {
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
|
||||
}
|
||||
|
||||
return &volOptions, &vid, err
|
||||
@ -410,7 +414,7 @@ func NewVolumeOptionsFromMonitorList(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts.RootPath = GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID))
|
||||
opts.RootPath = core.GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID))
|
||||
} else {
|
||||
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
|
||||
return nil, nil, err
|
||||
@ -509,7 +513,7 @@ func NewVolumeOptionsFromStaticVolume(
|
||||
func NewSnapshotOptionsFromID(
|
||||
ctx context.Context,
|
||||
snapID string,
|
||||
cr *util.Credentials) (*VolumeOptions, *SnapshotInfo, *SnapshotIdentifier, error) {
|
||||
cr *util.Credentials) (*VolumeOptions, *core.SnapshotInfo, *SnapshotIdentifier, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
volOptions VolumeOptions
|
||||
@ -551,7 +555,7 @@ func NewSnapshotOptionsFromID(
|
||||
}
|
||||
}()
|
||||
|
||||
fs := NewFileSystem(volOptions.conn)
|
||||
fs := core.NewFileSystem(volOptions.conn)
|
||||
volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID)
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
@ -579,14 +583,17 @@ func NewSnapshotOptionsFromID(
|
||||
sid.FsSnapshotName = imageAttributes.ImageName
|
||||
sid.FsSubvolName = imageAttributes.SourceName
|
||||
|
||||
subvolInfo, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(sid.FsSubvolName))
|
||||
volOptions.SubVolume.VolID = sid.FsSubvolName
|
||||
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
|
||||
|
||||
subvolInfo, err := vol.GetSubVolumeInfo(ctx)
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
}
|
||||
volOptions.Features = subvolInfo.Features
|
||||
volOptions.Size = subvolInfo.BytesQuota
|
||||
|
||||
info, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
snap := core.NewSnapshot(volOptions.conn, sid.FsSnapshotName, &volOptions.SubVolume)
|
||||
info, err := snap.GetSnapshotInfo(ctx)
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
}
|
||||
@ -594,8 +601,17 @@ func NewSnapshotOptionsFromID(
|
||||
return &volOptions, &info, &sid, nil
|
||||
}
|
||||
|
||||
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *CephfsSnapshot, err error) {
|
||||
cephfsSnap := &CephfsSnapshot{}
|
||||
// SnapshotOption is a struct that holds the information about the snapshot.
|
||||
type SnapshotOption struct {
|
||||
ReservedID string // ID reserved for the snapshot.
|
||||
RequestName string // Request name of the snapshot.
|
||||
ClusterID string // Cluster ID of to identify ceph cluster connection information.
|
||||
Monitors string // Monitors of the ceph cluster.
|
||||
NamePrefix string // Name prefix of the snapshot.
|
||||
}
|
||||
|
||||
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (*SnapshotOption, error) {
|
||||
cephfsSnap := &SnapshotOption{}
|
||||
cephfsSnap.RequestName = req.GetName()
|
||||
snapOptions := req.GetParameters()
|
||||
|
@ -115,8 +115,8 @@ func (cs *ControllerServer) parseVolCreateRequest(
|
||||
"multi node access modes are only supported on rbd `block` type volumes")
|
||||
}
|
||||
|
||||
if imageFeatures, ok := req.GetParameters()["imageFeatures"]; checkImageFeatures(imageFeatures, ok, true) {
|
||||
return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures")
|
||||
if imageFeatures, ok := req.GetParameters()["imageFeatures"]; !checkValidImageFeatures(imageFeatures, ok) {
|
||||
return nil, status.Error(codes.InvalidArgument, "empty imageFeatures parameter")
|
||||
}
|
||||
|
||||
// if it's NOT SINGLE_NODE_WRITER, and it's BLOCK we'll set the parameter to ignore the in-use checks
|
||||
|
@ -215,12 +215,33 @@ func populateRbdVol(
|
||||
rv.RbdImageName = imageAttributes.ImageName
|
||||
}
|
||||
|
||||
err = rv.Connect(cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to connect to volume %s: %v", rv, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// in case of any error call Destroy for cleanup.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
rv.Destroy()
|
||||
}
|
||||
}()
|
||||
// get the image details from the ceph cluster.
|
||||
err = rv.getImageInfo()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get image details %s: %v", rv, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if req.GetVolumeContext()["mounter"] == rbdDefaultMounter &&
|
||||
!isKrbdFeatureSupported(ctx, req.GetVolumeContext()["imageFeatures"]) {
|
||||
!isKrbdFeatureSupported(ctx, strings.Join(rv.ImageFeatureSet.Names(), ",")) {
|
||||
if !parseBoolOption(ctx, req.GetVolumeContext(), tryOtherMounters, false) {
|
||||
log.ErrorLog(ctx, "unsupported krbd Feature, set `tryOtherMounters:true` or fix krbd driver")
|
||||
err = errors.New("unsupported krbd Feature")
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "unsupported krbd Feature")
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// fallback to rbd-nbd,
|
||||
rv.Mounter = rbdNbdMounter
|
||||
@ -299,24 +320,10 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
|
||||
isStaticVol := parseBoolOption(ctx, req.GetVolumeContext(), staticVol, false)
|
||||
|
||||
// throw error when imageFeatures parameter is missing or empty
|
||||
// for backward compatibility, ignore error for non-static volumes from older cephcsi version
|
||||
if imageFeatures, ok := req.GetVolumeContext()["imageFeatures"]; checkImageFeatures(imageFeatures, ok, isStaticVol) {
|
||||
return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures")
|
||||
}
|
||||
|
||||
rv, err := populateRbdVol(ctx, req, cr, req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = rv.Connect(cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to connect to volume %s: %v", rv, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer rv.Destroy()
|
||||
|
||||
if isHealer {
|
||||
|
@ -199,14 +199,15 @@ func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries
|
||||
|
||||
// set features available with rbd-nbd, and NBD module loaded status.
|
||||
func setRbdNbdToolFeatures() {
|
||||
var stderr string
|
||||
// check if the module is loaded or compiled in
|
||||
_, err := os.Stat(fmt.Sprintf("/sys/module/%s", moduleNbd))
|
||||
if os.IsNotExist(err) {
|
||||
// try to load the module
|
||||
_, _, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd)
|
||||
_, stderr, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd)
|
||||
if err != nil {
|
||||
hasNBD = false
|
||||
log.WarningLogMsg("rbd-nbd: nbd modprobe failed with error %v", err)
|
||||
log.WarningLogMsg("rbd-nbd: nbd modprobe failed (%v): %q", err, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,11 +209,15 @@ var supportedFeatures = map[string]imageFeature{
|
||||
needRbdNbd: true,
|
||||
dependsOn: []string{librbd.FeatureNameExclusiveLock},
|
||||
},
|
||||
librbd.FeatureNameDeepFlatten: {
|
||||
needRbdNbd: false,
|
||||
},
|
||||
}
|
||||
|
||||
// GetKrbdSupportedFeatures load the module if needed and return supported
|
||||
// features attribute as a string.
|
||||
func GetKrbdSupportedFeatures() (string, error) {
|
||||
var stderr string
|
||||
// check if the module is loaded or compiled in
|
||||
_, err := os.Stat(krbdSupportedFeaturesFile)
|
||||
if err != nil {
|
||||
@ -223,9 +227,9 @@ func GetKrbdSupportedFeatures() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
// try to load the module
|
||||
_, _, err = util.ExecCommand(context.TODO(), "modprobe", rbdDefaultMounter)
|
||||
_, stderr, err = util.ExecCommand(context.TODO(), "modprobe", rbdDefaultMounter)
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("modprobe failed: %v", err)
|
||||
log.ErrorLogMsg("modprobe failed (%v): %q", err, stderr)
|
||||
|
||||
return "", err
|
||||
}
|
||||
@ -467,11 +471,10 @@ func (ri *rbdImage) isInUse() (bool, error) {
|
||||
return len(watchers) > defaultWatchers, nil
|
||||
}
|
||||
|
||||
// checkImageFeatures check presence of imageFeatures parameter. It returns true when
|
||||
// there imageFeatures is missing or empty, skips missing parameter for non-static volumes
|
||||
// for backward compatibility.
|
||||
func checkImageFeatures(imageFeatures string, ok, static bool) bool {
|
||||
return static && (!ok || imageFeatures == "")
|
||||
// checkValidImageFeatures check presence of imageFeatures parameter. It returns false when
|
||||
// there imageFeatures is present and empty.
|
||||
func checkValidImageFeatures(imageFeatures string, ok bool) bool {
|
||||
return !(ok && imageFeatures == "")
|
||||
}
|
||||
|
||||
// isNotMountPoint checks whether MountPoint does not exists and
|
||||
|
@ -152,6 +152,14 @@ func TestValidateImageFeatures(t *testing.T) {
|
||||
true,
|
||||
"invalid feature ayering",
|
||||
},
|
||||
{
|
||||
"deep-flatten",
|
||||
&rbdVolume{
|
||||
Mounter: rbdDefaultMounter,
|
||||
},
|
||||
false,
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -346,3 +354,35 @@ func TestIsKrbdFeatureSupported(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkValidImageFeatures(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
imageFeatures string
|
||||
ok bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "test for valid image features",
|
||||
imageFeatures: "layering,exclusive-lock,object-map,fast-diff,deep-flatten",
|
||||
ok: true,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "test for empty image features",
|
||||
imageFeatures: "",
|
||||
ok: true,
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tc := tt
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := checkValidImageFeatures(tc.imageFeatures, tc.ok); got != tc.want {
|
||||
t.Errorf("checkValidImageFeatures() = %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -9817,6 +9817,9 @@ var awsPartition = partition{
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
@ -13547,6 +13550,9 @@ var awsPartition = partition{
|
||||
},
|
||||
"mq": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "af-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-east-1",
|
||||
}: endpoint{},
|
||||
@ -20669,6 +20675,9 @@ var awsPartition = partition{
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.42.48"
|
||||
const SDKVersion = "1.43.3"
|
||||
|
4
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
@ -272,6 +272,9 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
||||
|
||||
switch value := v.Interface().(type) {
|
||||
case string:
|
||||
if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
|
||||
value = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
str = value
|
||||
case []byte:
|
||||
str = base64.StdEncoding.EncodeToString(value)
|
||||
@ -306,5 +309,6 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
||||
err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
return "", err
|
||||
}
|
||||
|
||||
return str, nil
|
||||
}
|
||||
|
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
@ -204,6 +204,13 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
||||
|
||||
switch v.Interface().(type) {
|
||||
case *string:
|
||||
if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
|
||||
b, err := base64.StdEncoding.DecodeString(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode JSONValue, %v", err)
|
||||
}
|
||||
header = string(b)
|
||||
}
|
||||
v.Set(reflect.ValueOf(&header))
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(header)
|
||||
|
230
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
230
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
@ -4790,7 +4790,7 @@ func (c *EC2) CreateFleetRequest(input *CreateFleetInput) (req *request.Request,
|
||||
// You can create a single EC2 Fleet that includes multiple launch specifications
|
||||
// that vary by instance type, AMI, Availability Zone, or subnet.
|
||||
//
|
||||
// For more information, see Launching an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html)
|
||||
// For more information, see EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@ -10283,7 +10283,7 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques
|
||||
// * Up to 1000 instances can be terminated in a single request to delete
|
||||
// instant fleets.
|
||||
//
|
||||
// For more information, see Deleting an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet)
|
||||
// For more information, see Delete an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@ -14900,8 +14900,6 @@ func (c *EC2) DeregisterInstanceEventNotificationAttributesRequest(input *Deregi
|
||||
|
||||
// DeregisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// c
|
||||
//
|
||||
// Deregisters tag keys to prevent tags that have the specified tag keys from
|
||||
// being included in scheduled event notifications for resources in the Region.
|
||||
//
|
||||
@ -18193,7 +18191,7 @@ func (c *EC2) DescribeFleetInstancesRequest(input *DescribeFleetInstancesInput)
|
||||
//
|
||||
// Describes the running instances for the specified EC2 Fleet.
|
||||
//
|
||||
// For more information, see Monitoring your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html#monitor-ec2-fleet)
|
||||
// For more information, see Monitor your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#monitor-ec2-fleet)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@ -18276,7 +18274,7 @@ func (c *EC2) DescribeFleetsRequest(input *DescribeFleetsInput) (req *request.Re
|
||||
//
|
||||
// Describes the specified EC2 Fleets or all of your EC2 Fleets.
|
||||
//
|
||||
// For more information, see Monitoring your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html#monitor-ec2-fleet)
|
||||
// For more information, see Monitor your EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#monitor-ec2-fleet)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@ -43190,7 +43188,7 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// To disable detailed monitoring, see .
|
||||
// To disable detailed monitoring, see UnmonitorInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_UnmonitorInstances.html).
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@ -44123,7 +44121,7 @@ func (c *EC2) RegisterInstanceEventNotificationAttributesRequest(input *Register
|
||||
// Registers a set of tag keys to include in scheduled event notifications for
|
||||
// your resources.
|
||||
//
|
||||
// To remove tags, use .
|
||||
// To remove tags, use DeregisterInstanceEventNotificationAttributes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeregisterInstanceEventNotificationAttributes.html).
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@ -54486,7 +54484,8 @@ type AvailabilityZone struct {
|
||||
// The name of the Region.
|
||||
RegionName *string `locationName:"regionName" type:"string"`
|
||||
|
||||
// The state of the Availability Zone, Local Zone, or Wavelength Zone.
|
||||
// The state of the Availability Zone, Local Zone, or Wavelength Zone. This
|
||||
// value is always available.
|
||||
State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"`
|
||||
|
||||
// The ID of the Availability Zone, Local Zone, or Wavelength Zone.
|
||||
@ -61415,11 +61414,11 @@ type CreateFleetError struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The error code that indicates why the instance could not be launched. For
|
||||
// more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
// more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
ErrorCode *string `locationName:"errorCode" type:"string"`
|
||||
|
||||
// The error message that describes why the instance could not be launched.
|
||||
// For more information about error messages, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
// For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
ErrorMessage *string `locationName:"errorMessage" type:"string"`
|
||||
|
||||
// The launch templates and overrides that were used for launching the instances.
|
||||
@ -61478,7 +61477,7 @@ type CreateFleetInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Unique, case-sensitive identifier that you provide to ensure the idempotency
|
||||
// of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
|
||||
// of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
|
||||
ClientToken *string `type:"string"`
|
||||
|
||||
// Reserved.
|
||||
@ -64773,10 +64772,9 @@ type CreateNetworkInterfaceInput struct {
|
||||
// The IDs of one or more security groups.
|
||||
Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
|
||||
|
||||
// Indicates the type of network interface. To create an Elastic Fabric Adapter
|
||||
// (EFA), specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide. To create a trunk network
|
||||
// interface, specify trunk.
|
||||
// The type of network interface. The default is interface.
|
||||
//
|
||||
// The only supported values are efa and trunk.
|
||||
InterfaceType *string `type:"string" enum:"NetworkInterfaceCreationType"`
|
||||
|
||||
// The number of IPv4 prefixes that Amazon Web Services automatically assigns
|
||||
@ -77529,7 +77527,7 @@ type DescribeAvailabilityZonesInput struct {
|
||||
// * region-name - The name of the Region for the Zone (for example, us-east-1).
|
||||
//
|
||||
// * state - The state of the Availability Zone, the Local Zone, or the Wavelength
|
||||
// Zone (available | information | impaired | unavailable).
|
||||
// Zone (available).
|
||||
//
|
||||
// * zone-id - The ID of the Availability Zone (for example, use1-az1), the
|
||||
// Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example,
|
||||
@ -78057,6 +78055,9 @@ type DescribeCapacityReservationsInput struct {
|
||||
// instances that have matching attributes (instance type, platform, and
|
||||
// Availability Zone), and explicitly target the Capacity Reservation. This
|
||||
// ensures that only permitted instances can use the reserved capacity.
|
||||
//
|
||||
// * placement-group-arn - The ARN of the cluster placement group in which
|
||||
// the Capacity Reservation was created.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The maximum number of results to return for the request in a single page.
|
||||
@ -79159,11 +79160,12 @@ type DescribeCoipPoolsInput struct {
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// The filters. The following are the possible values:
|
||||
// One or more filters.
|
||||
//
|
||||
// * coip-pool.pool-id
|
||||
// * coip-pool.local-gateway-route-table-id - The ID of the local gateway
|
||||
// route table.
|
||||
//
|
||||
// * coip-pool.local-gateway-route-table-id
|
||||
// * coip-pool.pool-id - The ID of the address pool.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The maximum number of results to return with a single call. To retrieve the
|
||||
@ -80574,11 +80576,11 @@ type DescribeFleetError struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The error code that indicates why the instance could not be launched. For
|
||||
// more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
// more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
ErrorCode *string `locationName:"errorCode" type:"string"`
|
||||
|
||||
// The error message that describes why the instance could not be launched.
|
||||
// For more information about error messages, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
// For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
|
||||
ErrorMessage *string `locationName:"errorMessage" type:"string"`
|
||||
|
||||
// The launch templates and overrides that were used for launching the instances.
|
||||
@ -80970,7 +80972,10 @@ type DescribeFleetsInput struct {
|
||||
// * type - The type of request (instant | request | maintain).
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The ID of the EC2 Fleets.
|
||||
// The IDs of the EC2 Fleets.
|
||||
//
|
||||
// If a fleet is of type instant, you must specify the fleet ID, otherwise it
|
||||
// does not appear in the response.
|
||||
FleetIds []*string `locationName:"FleetId" type:"list"`
|
||||
|
||||
// The maximum number of results to return in a single call. Specify a value
|
||||
@ -83812,8 +83817,8 @@ type DescribeInstanceTypesInput struct {
|
||||
// * instance-storage-info.disk.type - The storage technology for the local
|
||||
// instance storage disks (hdd | ssd).
|
||||
//
|
||||
// * instance-storage-info.encryption-supported - Indicates whether data
|
||||
// is encrypted at rest (required | unsupported).
|
||||
// * instance-storage-info.encryption-support - Indicates whether data is
|
||||
// encrypted at rest (required | supported | unsupported).
|
||||
//
|
||||
// * instance-storage-info.nvme-support - Indicates whether non-volatile
|
||||
// memory express (NVMe) is supported for instance store (required | supported
|
||||
@ -83851,6 +83856,9 @@ type DescribeInstanceTypesInput struct {
|
||||
// * network-info.ipv6-supported - Indicates whether the instance type supports
|
||||
// IPv6 (true | false).
|
||||
//
|
||||
// * network-info.maximum-network-cards - The maximum number of network cards
|
||||
// per instance.
|
||||
//
|
||||
// * network-info.maximum-network-interfaces - The maximum number of network
|
||||
// interfaces per instance.
|
||||
//
|
||||
@ -85513,6 +85521,9 @@ type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct
|
||||
//
|
||||
// * local-gateway-id - The ID of a local gateway.
|
||||
//
|
||||
// * local-gateway-route-table-arn - The Amazon Resource Name (ARN) of the
|
||||
// local gateway route table for the virtual interface group.
|
||||
//
|
||||
// * local-gateway-route-table-id - The ID of the local gateway route table.
|
||||
//
|
||||
// * local-gateway-route-table-virtual-interface-group-association-id - The
|
||||
@ -85521,6 +85532,9 @@ type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct
|
||||
// * local-gateway-route-table-virtual-interface-group-id - The ID of the
|
||||
// virtual interface group.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway virtual interface group association.
|
||||
//
|
||||
// * state - The state of the association.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
@ -85650,10 +85664,16 @@ type DescribeLocalGatewayRouteTableVpcAssociationsInput struct {
|
||||
//
|
||||
// * local-gateway-id - The ID of a local gateway.
|
||||
//
|
||||
// * local-gateway-route-table-arn - The Amazon Resource Name (ARN) of the
|
||||
// local gateway route table for the association.
|
||||
//
|
||||
// * local-gateway-route-table-id - The ID of the local gateway route table.
|
||||
//
|
||||
// * local-gateway-route-table-vpc-association-id - The ID of the association.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway route table for the association.
|
||||
//
|
||||
// * state - The state of the association.
|
||||
//
|
||||
// * vpc-id - The ID of the VPC.
|
||||
@ -85785,10 +85805,16 @@ type DescribeLocalGatewayRouteTablesInput struct {
|
||||
//
|
||||
// * local-gateway-id - The ID of a local gateway.
|
||||
//
|
||||
// * local-gateway-route-table-arn - The Amazon Resource Name (ARN) of the
|
||||
// local gateway route table.
|
||||
//
|
||||
// * local-gateway-route-table-id - The ID of a local gateway route table.
|
||||
//
|
||||
// * outpost-arn - The Amazon Resource Name (ARN) of the Outpost.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway route table.
|
||||
//
|
||||
// * state - The state of the local gateway route table.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
@ -85918,10 +85944,13 @@ type DescribeLocalGatewayVirtualInterfaceGroupsInput struct {
|
||||
//
|
||||
// * local-gateway-id - The ID of a local gateway.
|
||||
//
|
||||
// * local-gateway-virtual-interface-id - The ID of the virtual interface.
|
||||
//
|
||||
// * local-gateway-virtual-interface-group-id - The ID of the virtual interface
|
||||
// group.
|
||||
//
|
||||
// * local-gateway-virtual-interface-id - The ID of the virtual interface.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway virtual interface group.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The IDs of the virtual interface groups.
|
||||
@ -86047,6 +86076,27 @@ type DescribeLocalGatewayVirtualInterfacesInput struct {
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// One or more filters.
|
||||
//
|
||||
// * local-address - The local address.
|
||||
//
|
||||
// * local-bgp-asn - The Border Gateway Protocol (BGP) Autonomous System
|
||||
// Number (ASN) of the local gateway.
|
||||
//
|
||||
// * local-gateway-id - The ID of the local gateway.
|
||||
//
|
||||
// * local-gateway-virtual-interface-id - The ID of the virtual interface.
|
||||
//
|
||||
// * local-gateway-virtual-interface-group-id - The ID of the virtual interface
|
||||
// group.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway virtual interface.
|
||||
//
|
||||
// * peer-address - The peer address.
|
||||
//
|
||||
// * peer-bgp-asn - The peer BGP ASN.
|
||||
//
|
||||
// * vlan - The ID of the VLAN.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The IDs of the virtual interfaces.
|
||||
@ -86171,24 +86221,19 @@ type DescribeLocalGatewaysInput struct {
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// One or more filters.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// One or more filters.
|
||||
//
|
||||
// * local-gateway-id - The ID of a local gateway.
|
||||
//
|
||||
// * local-gateway-route-table-id - The ID of the local gateway route table.
|
||||
//
|
||||
// * local-gateway-route-table-virtual-interface-group-association-id - The
|
||||
// ID of the association.
|
||||
//
|
||||
// * local-gateway-route-table-virtual-interface-group-id - The ID of the
|
||||
// virtual interface group.
|
||||
//
|
||||
// * outpost-arn - The Amazon Resource Name (ARN) of the Outpost.
|
||||
//
|
||||
// * owner-id - The ID of the Amazon Web Services account that owns the local
|
||||
// gateway.
|
||||
//
|
||||
// * state - The state of the association.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The IDs of the local gateways.
|
||||
LocalGatewayIds []*string `locationName:"LocalGatewayId" locationNameList:"item" type:"list"`
|
||||
|
||||
// The maximum number of results to return with a single call. To retrieve the
|
||||
@ -87791,6 +87836,12 @@ type DescribeNetworkInterfacesInput struct {
|
||||
// * ipv6-addresses.ipv6-address - An IPv6 address associated with the network
|
||||
// interface.
|
||||
//
|
||||
// * interface-type - The type of network interface (api_gateway_managed
|
||||
// | aws_codestar_connections_managed | branch | efa | gateway_load_balancer
|
||||
// | gateway_load_balancer_endpoint | global_accelerator_managed | interface
|
||||
// | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer
|
||||
// | quicksight | transit_gateway | trunk | vpc_endpoint).
|
||||
//
|
||||
// * mac-address - The MAC address of the network interface.
|
||||
//
|
||||
// * network-interface-id - The ID of the network interface.
|
||||
@ -99447,7 +99498,7 @@ type EbsBlockDevice struct {
|
||||
// Encrypted volumes can only be attached to instances that support Amazon EBS
|
||||
// encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances).
|
||||
//
|
||||
// This parameter is not returned by .
|
||||
// This parameter is not returned by DescribeImageAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImageAttribute.html).
|
||||
Encrypted *bool `locationName:"encrypted" type:"boolean"`
|
||||
|
||||
// The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes,
|
||||
@ -103794,7 +103845,7 @@ type FleetData struct {
|
||||
ActivityStatus *string `locationName:"activityStatus" type:"string" enum:"FleetActivityStatus"`
|
||||
|
||||
// Unique, case-sensitive identifier that you provide to ensure the idempotency
|
||||
// of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
|
||||
// of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
|
||||
//
|
||||
// Constraints: Maximum 64 ASCII characters
|
||||
ClientToken *string `locationName:"clientToken" type:"string"`
|
||||
@ -105716,15 +105767,17 @@ type GetCoipPoolUsageInput struct {
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// The filters. The following are the possible values:
|
||||
// One or more filters.
|
||||
//
|
||||
// * coip-address-usage.allocation-id
|
||||
// * coip-address-usage.allocation-id - The allocation ID of the address.
|
||||
//
|
||||
// * coip-address-usage.aws-account-id
|
||||
// * coip-address-usage.aws-account-id - The ID of the Amazon Web Services
|
||||
// account that is using the customer-owned IP address.
|
||||
//
|
||||
// * coip-address-usage.aws-service
|
||||
// * coip-address-usage.aws-service - The Amazon Web Services service that
|
||||
// is using the customer-owned IP address.
|
||||
//
|
||||
// * coip-address-usage.co-ip
|
||||
// * coip-address-usage.co-ip - The customer-owned IP address.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The maximum number of results to return with a single call. To retrieve the
|
||||
@ -114841,7 +114894,7 @@ type InstanceNetworkInterface struct {
|
||||
// One or more security groups.
|
||||
Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
|
||||
|
||||
// Describes the type of network interface.
|
||||
// The type of network interface.
|
||||
//
|
||||
// Valid values: interface | efa | trunk
|
||||
InterfaceType *string `locationName:"interfaceType" type:"string"`
|
||||
@ -115198,10 +115251,6 @@ type InstanceNetworkInterfaceSpecification struct {
|
||||
|
||||
// The type of network interface.
|
||||
//
|
||||
// To create an Elastic Fabric Adapter (EFA), specify efa. For more information,
|
||||
// see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Valid values: interface | efa
|
||||
InterfaceType *string `type:"string"`
|
||||
|
||||
@ -120588,6 +120637,12 @@ type LaunchTemplateInstanceMetadataOptions struct {
|
||||
// not available.
|
||||
HttpTokens *string `locationName:"httpTokens" type:"string" enum:"LaunchTemplateHttpTokensState"`
|
||||
|
||||
// Set to enabled to allow access to instance tags from the instance metadata.
|
||||
// Set to disabled to turn off access to instance tags from the instance metadata.
|
||||
// For more information, see Work with instance tags using the instance metadata
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS).
|
||||
//
|
||||
// Default: disabled
|
||||
InstanceMetadataTags *string `locationName:"instanceMetadataTags" type:"string" enum:"LaunchTemplateInstanceMetadataTagsState"`
|
||||
|
||||
// The state of the metadata option changes.
|
||||
@ -121887,8 +121942,8 @@ type LaunchTemplateTagSpecificationRequest struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The type of resource to tag. Currently, the resource types that support tagging
|
||||
// on creation are instance and volume. To tag a resource after it has been
|
||||
// created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html).
|
||||
// on creation are instance, volume, elastic-gpu, network-interface, and spot-instances-request.
|
||||
// To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html).
|
||||
ResourceType *string `type:"string" enum:"ResourceType"`
|
||||
|
||||
// The tags to apply to the resource.
|
||||
@ -146251,6 +146306,23 @@ type SearchLocalGatewayRoutesInput struct {
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// One or more filters.
|
||||
//
|
||||
// * route-search.exact-match - The exact match of the specified filter.
|
||||
//
|
||||
// * route-search.longest-prefix-match - The longest prefix that matches
|
||||
// the route.
|
||||
//
|
||||
// * route-search.subnet-of-match - The routes with a subnet that match the
|
||||
// specified CIDR filter.
|
||||
//
|
||||
// * route-search.supernet-of-match - The routes with a CIDR that encompass
|
||||
// the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31
|
||||
// routes in your route table and you specify supernet-of-match as 10.0.1.0/30,
|
||||
// then the result returns 10.0.1.0/29.
|
||||
//
|
||||
// * state - The state of the route.
|
||||
//
|
||||
// * type - The route type.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// The ID of the local gateway route table.
|
||||
@ -151788,7 +151860,7 @@ type Tag struct {
|
||||
|
||||
// The value of the tag.
|
||||
//
|
||||
// Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode
|
||||
// Constraints: Tag values are case-sensitive and accept a maximum of 256 Unicode
|
||||
// characters.
|
||||
Value *string `locationName:"value" type:"string"`
|
||||
}
|
||||
@ -165264,6 +165336,45 @@ const (
|
||||
|
||||
// NetworkInterfaceTypeTrunk is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeTrunk = "trunk"
|
||||
|
||||
// NetworkInterfaceTypeLoadBalancer is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeLoadBalancer = "load_balancer"
|
||||
|
||||
// NetworkInterfaceTypeNetworkLoadBalancer is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeNetworkLoadBalancer = "network_load_balancer"
|
||||
|
||||
// NetworkInterfaceTypeVpcEndpoint is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeVpcEndpoint = "vpc_endpoint"
|
||||
|
||||
// NetworkInterfaceTypeBranch is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeBranch = "branch"
|
||||
|
||||
// NetworkInterfaceTypeTransitGateway is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeTransitGateway = "transit_gateway"
|
||||
|
||||
// NetworkInterfaceTypeLambda is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeLambda = "lambda"
|
||||
|
||||
// NetworkInterfaceTypeQuicksight is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeQuicksight = "quicksight"
|
||||
|
||||
// NetworkInterfaceTypeGlobalAcceleratorManaged is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeGlobalAcceleratorManaged = "global_accelerator_managed"
|
||||
|
||||
// NetworkInterfaceTypeApiGatewayManaged is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeApiGatewayManaged = "api_gateway_managed"
|
||||
|
||||
// NetworkInterfaceTypeGatewayLoadBalancer is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeGatewayLoadBalancer = "gateway_load_balancer"
|
||||
|
||||
// NetworkInterfaceTypeGatewayLoadBalancerEndpoint is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeGatewayLoadBalancerEndpoint = "gateway_load_balancer_endpoint"
|
||||
|
||||
// NetworkInterfaceTypeIotRulesManaged is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeIotRulesManaged = "iot_rules_managed"
|
||||
|
||||
// NetworkInterfaceTypeAwsCodestarConnectionsManaged is a NetworkInterfaceType enum value
|
||||
NetworkInterfaceTypeAwsCodestarConnectionsManaged = "aws_codestar_connections_managed"
|
||||
)
|
||||
|
||||
// NetworkInterfaceType_Values returns all elements of the NetworkInterfaceType enum
|
||||
@ -165273,6 +165384,19 @@ func NetworkInterfaceType_Values() []string {
|
||||
NetworkInterfaceTypeNatGateway,
|
||||
NetworkInterfaceTypeEfa,
|
||||
NetworkInterfaceTypeTrunk,
|
||||
NetworkInterfaceTypeLoadBalancer,
|
||||
NetworkInterfaceTypeNetworkLoadBalancer,
|
||||
NetworkInterfaceTypeVpcEndpoint,
|
||||
NetworkInterfaceTypeBranch,
|
||||
NetworkInterfaceTypeTransitGateway,
|
||||
NetworkInterfaceTypeLambda,
|
||||
NetworkInterfaceTypeQuicksight,
|
||||
NetworkInterfaceTypeGlobalAcceleratorManaged,
|
||||
NetworkInterfaceTypeApiGatewayManaged,
|
||||
NetworkInterfaceTypeGatewayLoadBalancer,
|
||||
NetworkInterfaceTypeGatewayLoadBalancerEndpoint,
|
||||
NetworkInterfaceTypeIotRulesManaged,
|
||||
NetworkInterfaceTypeAwsCodestarConnectionsManaged,
|
||||
}
|
||||
}
|
||||
|
||||
|
62
vendor/github.com/ceph/go-ceph/cephfs/admin/mgrmodule.go
generated
vendored
62
vendor/github.com/ceph/go-ceph/cephfs/admin/mgrmodule.go
generated
vendored
@ -1,40 +1,31 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"github.com/ceph/go-ceph/internal/commands"
|
||||
"github.com/ceph/go-ceph/common/admin/manager"
|
||||
)
|
||||
|
||||
const mirroring = "mirroring"
|
||||
|
||||
// EnableModule will enable the specified manager module.
|
||||
//
|
||||
// Deprecated: use the equivalent function in cluster/admin/manager.
|
||||
//
|
||||
// Similar To:
|
||||
// ceph mgr module enable <module> [--force]
|
||||
func (fsa *FSAdmin) EnableModule(module string, force bool) error {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module enable",
|
||||
"module": module,
|
||||
"format": "json",
|
||||
}
|
||||
if force {
|
||||
m["force"] = "--force"
|
||||
}
|
||||
// Why is this _only_ part of the mon command json? You'd think a mgr
|
||||
// command would be available as a MgrCommand but I couldn't figure it out.
|
||||
return commands.MarshalMonCommand(fsa.conn, m).NoData().End()
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.EnableModule(module, force)
|
||||
}
|
||||
|
||||
// DisableModule will disable the specified manager module.
|
||||
//
|
||||
// Deprecated: use the equivalent function in cluster/admin/manager.
|
||||
//
|
||||
// Similar To:
|
||||
// ceph mgr module disable <module>
|
||||
func (fsa *FSAdmin) DisableModule(module string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module disable",
|
||||
"module": module,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMonCommand(fsa.conn, m).NoData().End()
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.DisableModule(module)
|
||||
}
|
||||
|
||||
// EnableMirroringModule will enable the mirroring module for cephfs.
|
||||
@ -42,7 +33,8 @@ func (fsa *FSAdmin) DisableModule(module string) error {
|
||||
// Similar To:
|
||||
// ceph mgr module enable mirroring [--force]
|
||||
func (fsa *FSAdmin) EnableMirroringModule(force bool) error {
|
||||
return fsa.EnableModule(mirroring, force)
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.EnableModule(mirroring, force)
|
||||
}
|
||||
|
||||
// DisableMirroringModule will disable the mirroring module for cephfs.
|
||||
@ -50,34 +42,6 @@ func (fsa *FSAdmin) EnableMirroringModule(force bool) error {
|
||||
// Similar To:
|
||||
// ceph mgr module disable mirroring
|
||||
func (fsa *FSAdmin) DisableMirroringModule() error {
|
||||
return fsa.DisableModule(mirroring)
|
||||
}
|
||||
|
||||
type moduleInfo struct {
|
||||
EnabledModules []string `json:"enabled_modules"`
|
||||
//DisabledModules []string `json:"disabled_modules"`
|
||||
// DisabledModules is documented in ceph as a list of string
|
||||
// but that's not what comes back from the server (on pacific).
|
||||
// Since we don't need this today, we're just going to ignore
|
||||
// it, but if we ever want to support this for external consumers
|
||||
// we'll need to figure out the real structure of this.
|
||||
}
|
||||
|
||||
func parseModuleInfo(res response) (*moduleInfo, error) {
|
||||
m := &moduleInfo{}
|
||||
if err := res.NoStatus().Unmarshal(m).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// listModules returns moduleInfo or error. it is not exported because
|
||||
// this is really not a cephfs specific thing but we needed it
|
||||
// for cephfs tests. maybe lift it somewhere else someday.
|
||||
func (fsa *FSAdmin) listModules() (*moduleInfo, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module ls",
|
||||
"format": "json",
|
||||
}
|
||||
return parseModuleInfo(commands.MarshalMonCommand(fsa.conn, m))
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.DisableModule(mirroring)
|
||||
}
|
||||
|
17
vendor/github.com/ceph/go-ceph/common/admin/manager/admin.go
generated
vendored
Normal file
17
vendor/github.com/ceph/go-ceph/common/admin/manager/admin.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
ccom "github.com/ceph/go-ceph/common/commands"
|
||||
)
|
||||
|
||||
// MgrAdmin is used to administrate ceph's manager (mgr).
|
||||
type MgrAdmin struct {
|
||||
conn ccom.RadosCommander
|
||||
}
|
||||
|
||||
// NewFromConn creates an new management object from a preexisting
|
||||
// rados connection. The existing connection can be rados.Conn or any
|
||||
// type implementing the RadosCommander interface.
|
||||
func NewFromConn(conn ccom.RadosCommander) *MgrAdmin {
|
||||
return &MgrAdmin{conn}
|
||||
}
|
5
vendor/github.com/ceph/go-ceph/common/admin/manager/doc.go
generated
vendored
Normal file
5
vendor/github.com/ceph/go-ceph/common/admin/manager/doc.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
/*
|
||||
Package manager from common/admin contains a set of APIs used to interact
|
||||
with and administer the Ceph manager (mgr).
|
||||
*/
|
||||
package manager
|
75
vendor/github.com/ceph/go-ceph/common/admin/manager/module.go
generated
vendored
Normal file
75
vendor/github.com/ceph/go-ceph/common/admin/manager/module.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/ceph/go-ceph/internal/commands"
|
||||
)
|
||||
|
||||
// EnableModule will enable the specified manager module.
|
||||
//
|
||||
// Similar To:
|
||||
// ceph mgr module enable <module> [--force]
|
||||
func (fsa *MgrAdmin) EnableModule(module string, force bool) error {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module enable",
|
||||
"module": module,
|
||||
"format": "json",
|
||||
}
|
||||
if force {
|
||||
m["force"] = "--force"
|
||||
}
|
||||
// Why is this _only_ part of the mon command json? You'd think a mgr
|
||||
// command would be available as a MgrCommand but I couldn't figure it out.
|
||||
return commands.MarshalMonCommand(fsa.conn, m).NoData().End()
|
||||
}
|
||||
|
||||
// DisableModule will disable the specified manager module.
|
||||
//
|
||||
// Similar To:
|
||||
// ceph mgr module disable <module>
|
||||
func (fsa *MgrAdmin) DisableModule(module string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module disable",
|
||||
"module": module,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMonCommand(fsa.conn, m).NoData().End()
|
||||
}
|
||||
|
||||
// DisabledModule describes a disabled Ceph mgr module.
|
||||
// The Ceph JSON structure contains a complex module_options
|
||||
// substructure that go-ceph does not currently implement.
|
||||
type DisabledModule struct {
|
||||
Name string `json:"name"`
|
||||
CanRun bool `json:"can_run"`
|
||||
ErrorString string `json:"error_string"`
|
||||
}
|
||||
|
||||
// ModuleInfo contains fields that report the status of modules within the
|
||||
// ceph mgr.
|
||||
type ModuleInfo struct {
|
||||
// EnabledModules lists the names of the enabled modules.
|
||||
EnabledModules []string `json:"enabled_modules"`
|
||||
// AlwaysOnModules lists the names of the always-on modules.
|
||||
AlwaysOnModules []string `json:"always_on_modules"`
|
||||
// DisabledModules lists structures describing modules that are
|
||||
// not currently enabled.
|
||||
DisabledModules []DisabledModule `json:"disabled_modules"`
|
||||
}
|
||||
|
||||
func parseModuleInfo(res commands.Response) (*ModuleInfo, error) {
|
||||
m := &ModuleInfo{}
|
||||
if err := res.NoStatus().Unmarshal(m).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ListModules returns a module info struct reporting the lists of
|
||||
// enabled, disabled, and always-on modules in the Ceph mgr.
|
||||
func (fsa *MgrAdmin) ListModules() (*ModuleInfo, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "mgr module ls",
|
||||
"format": "json",
|
||||
}
|
||||
return parseModuleInfo(commands.MarshalMonCommand(fsa.conn, m))
|
||||
}
|
2
vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go
generated
vendored
2
vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go
generated
vendored
@ -26,4 +26,4 @@ func (v *SyncBuffer) Release() {
|
||||
|
||||
// Sync asserts that changes in the C buffer are available in the data
|
||||
// slice
|
||||
func (v *SyncBuffer) Sync() {}
|
||||
func (*SyncBuffer) Sync() {}
|
||||
|
22
vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go
generated
vendored
Normal file
22
vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
// AssertVersion ensures that the object exists and that its internal version
|
||||
// number is equal to "ver" before reading. "ver" should be a version number
|
||||
// previously obtained with IOContext.GetLastVersion().
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_read_op_assert_version(rados_read_op_t read_op,
|
||||
// uint64_t ver)
|
||||
func (r *ReadOp) AssertVersion(ver uint64) {
|
||||
C.rados_read_op_assert_version(r.op, C.uint64_t(ver))
|
||||
}
|
22
vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go
generated
vendored
Normal file
22
vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
// AssertVersion ensures that the object exists and that its internal version
|
||||
// number is equal to "ver" before writing. "ver" should be a version number
|
||||
// previously obtained with IOContext.GetLastVersion().
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_read_op_assert_version(rados_read_op_t read_op,
|
||||
// uint64_t ver)
|
||||
func (w *WriteOp) AssertVersion(ver uint64) {
|
||||
C.rados_write_op_assert_version(w.op, C.uint64_t(ver))
|
||||
}
|
19
vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go
generated
vendored
Normal file
19
vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
// Remove object.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_write_op_remove(rados_write_op_t write_op)
|
||||
func (w *WriteOp) Remove() {
|
||||
C.rados_write_op_remove(w.op)
|
||||
}
|
34
vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go
generated
vendored
Normal file
34
vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// SetXattr sets an xattr.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_write_op_setxattr(rados_write_op_t write_op,
|
||||
// const char * name,
|
||||
// const char * value,
|
||||
// size_t value_len)
|
||||
func (w *WriteOp) SetXattr(name string, value []byte) {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
C.rados_write_op_setxattr(
|
||||
w.op,
|
||||
cName,
|
||||
(*C.char)(unsafe.Pointer(&value[0])),
|
||||
C.size_t(len(value)),
|
||||
)
|
||||
}
|
120
vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go
generated
vendored
Normal file
120
vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ReadOpOmapGetValsByKeysStep holds the result of the
|
||||
// GetOmapValuesByKeys read operation.
|
||||
// Result is valid only after Operate() was called.
|
||||
type ReadOpOmapGetValsByKeysStep struct {
|
||||
// C arguments
|
||||
|
||||
iter C.rados_omap_iter_t
|
||||
prval *C.int
|
||||
|
||||
// Internal state
|
||||
|
||||
// canIterate is only set after the operation is performed and is
|
||||
// intended to prevent premature fetching of data.
|
||||
canIterate bool
|
||||
}
|
||||
|
||||
func newReadOpOmapGetValsByKeysStep() *ReadOpOmapGetValsByKeysStep {
|
||||
s := &ReadOpOmapGetValsByKeysStep{
|
||||
prval: (*C.int)(C.malloc(C.sizeof_int)),
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *ReadOpOmapGetValsByKeysStep) free() {
|
||||
s.canIterate = false
|
||||
C.rados_omap_get_end(s.iter)
|
||||
|
||||
C.free(unsafe.Pointer(s.prval))
|
||||
s.prval = nil
|
||||
}
|
||||
|
||||
func (s *ReadOpOmapGetValsByKeysStep) update() error {
|
||||
err := getError(*s.prval)
|
||||
s.canIterate = (err == nil)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Next gets the next omap key/value pair referenced by
|
||||
// ReadOpOmapGetValsByKeysStep's internal iterator.
|
||||
// If there are no more elements to retrieve, (nil, nil) is returned.
|
||||
// May be called only after Operate() finished.
|
||||
// PREVIEW
|
||||
func (s *ReadOpOmapGetValsByKeysStep) Next() (*OmapKeyValue, error) {
|
||||
if !s.canIterate {
|
||||
return nil, ErrOperationIncomplete
|
||||
}
|
||||
|
||||
var (
|
||||
cKey *C.char
|
||||
cVal *C.char
|
||||
cValLen C.size_t
|
||||
)
|
||||
|
||||
ret := C.rados_omap_get_next(s.iter, &cKey, &cVal, &cValLen)
|
||||
if ret != 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
|
||||
if cKey == nil {
|
||||
// Iterator has reached the end of the list.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &OmapKeyValue{
|
||||
Key: C.GoString(cKey),
|
||||
Value: C.GoBytes(unsafe.Pointer(cVal), C.int(cValLen)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetOmapValuesByKeys starts iterating over specific key/value pairs.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_read_op_omap_get_vals_by_keys(rados_read_op_t read_op,
|
||||
// char const * const * keys,
|
||||
// size_t keys_len,
|
||||
// rados_omap_iter_t * iter,
|
||||
// int * prval)
|
||||
func (r *ReadOp) GetOmapValuesByKeys(keys []string) *ReadOpOmapGetValsByKeysStep {
|
||||
s := newReadOpOmapGetValsByKeysStep()
|
||||
r.steps = append(r.steps, s)
|
||||
|
||||
cKeys := make([]*C.char, len(keys))
|
||||
defer func() {
|
||||
for _, cKeyPtr := range cKeys {
|
||||
C.free(unsafe.Pointer(cKeyPtr))
|
||||
}
|
||||
}()
|
||||
|
||||
for i, key := range keys {
|
||||
cKeys[i] = C.CString(key)
|
||||
}
|
||||
|
||||
C.rados_read_op_omap_get_vals_by_keys(
|
||||
r.op,
|
||||
&cKeys[0],
|
||||
C.size_t(len(keys)),
|
||||
&s.iter,
|
||||
s.prval,
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
75
vendor/github.com/ceph/go-ceph/rados/read_op_read.go
generated
vendored
Normal file
75
vendor/github.com/ceph/go-ceph/rados/read_op_read.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
// #include <rados/librados.h>
|
||||
// #include <stdlib.h>
|
||||
//
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ReadOpReadStep holds the result of the Read read operation.
|
||||
// Result is valid only after Operate() was called.
|
||||
type ReadOpReadStep struct {
|
||||
// C returned data:
|
||||
bytesRead *C.size_t
|
||||
prval *C.int
|
||||
|
||||
BytesRead int64 // Bytes read by this action.
|
||||
Result int // Result of this action.
|
||||
}
|
||||
|
||||
func (s *ReadOpReadStep) update() error {
|
||||
s.BytesRead = (int64)(*s.bytesRead)
|
||||
s.Result = (int)(*s.prval)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ReadOpReadStep) free() {
|
||||
C.free(unsafe.Pointer(s.bytesRead))
|
||||
C.free(unsafe.Pointer(s.prval))
|
||||
|
||||
s.bytesRead = nil
|
||||
s.prval = nil
|
||||
}
|
||||
|
||||
func newReadOpReadStep() *ReadOpReadStep {
|
||||
return &ReadOpReadStep{
|
||||
bytesRead: (*C.size_t)(C.malloc(C.sizeof_size_t)),
|
||||
prval: (*C.int)(C.malloc(C.sizeof_int)),
|
||||
}
|
||||
}
|
||||
|
||||
// Read bytes from offset into buffer.
|
||||
// len(buffer) is the maximum number of bytes read from the object.
|
||||
// buffer[:ReadOpReadStep.BytesRead] then contains object data.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_read_op_read(rados_read_op_t read_op,
|
||||
// uint64_t offset,
|
||||
// size_t len,
|
||||
// char * buffer,
|
||||
// size_t * bytes_read,
|
||||
// int * prval)
|
||||
func (r *ReadOp) Read(offset uint64, buffer []byte) *ReadOpReadStep {
|
||||
oe := newReadStep(buffer, offset)
|
||||
readStep := newReadOpReadStep()
|
||||
r.steps = append(r.steps, oe, readStep)
|
||||
C.rados_read_op_read(
|
||||
r.op,
|
||||
oe.cOffset,
|
||||
oe.cReadLen,
|
||||
oe.cBuffer,
|
||||
readStep.bytesRead,
|
||||
readStep.prval,
|
||||
)
|
||||
|
||||
return readStep
|
||||
}
|
31
vendor/github.com/ceph/go-ceph/rados/read_step.go
generated
vendored
Normal file
31
vendor/github.com/ceph/go-ceph/rados/read_step.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package rados
|
||||
|
||||
// #include <stdint.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type readStep struct {
|
||||
withoutUpdate
|
||||
withoutFree
|
||||
// the c pointer utilizes the Go byteslice data and no free is needed
|
||||
|
||||
// inputs:
|
||||
b []byte
|
||||
|
||||
// arguments:
|
||||
cBuffer *C.char
|
||||
cReadLen C.size_t
|
||||
cOffset C.uint64_t
|
||||
}
|
||||
|
||||
func newReadStep(b []byte, offset uint64) *readStep {
|
||||
return &readStep{
|
||||
b: b,
|
||||
cBuffer: (*C.char)(unsafe.Pointer(&b[0])), // TODO: must be pinned
|
||||
cReadLen: C.size_t(len(b)),
|
||||
cOffset: C.uint64_t(offset),
|
||||
}
|
||||
}
|
379
vendor/github.com/ceph/go-ceph/rados/watcher.go
generated
vendored
Normal file
379
vendor/github.com/ceph/go-ceph/rados/watcher.go
generated
vendored
Normal file
@ -0,0 +1,379 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lrados
|
||||
#include <stdlib.h>
|
||||
#include <rados/librados.h>
|
||||
extern void watchNotifyCb(void*, uint64_t, uint64_t, uint64_t, void*, size_t);
|
||||
extern void watchErrorCb(void*, uint64_t, int);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
// WatcherID is the unique id of a Watcher.
|
||||
WatcherID uint64
|
||||
// NotifyID is the unique id of a NotifyEvent.
|
||||
NotifyID uint64
|
||||
// NotifierID is the unique id of a notifying client.
|
||||
NotifierID uint64
|
||||
)
|
||||
|
||||
// NotifyEvent is received by a watcher for each notification.
|
||||
type NotifyEvent struct {
|
||||
ID NotifyID
|
||||
WatcherID WatcherID
|
||||
NotifierID NotifierID
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// NotifyAck represents an acknowleged notification.
|
||||
type NotifyAck struct {
|
||||
WatcherID WatcherID
|
||||
NotifierID NotifierID
|
||||
Response []byte
|
||||
}
|
||||
|
||||
// NotifyTimeout represents an unacknowleged notification.
|
||||
type NotifyTimeout struct {
|
||||
WatcherID WatcherID
|
||||
NotifierID NotifierID
|
||||
}
|
||||
|
||||
// Watcher receives all notifications for certain object.
|
||||
type Watcher struct {
|
||||
id WatcherID
|
||||
oid string
|
||||
ioctx *IOContext
|
||||
events chan NotifyEvent
|
||||
errors chan error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
watchers = map[WatcherID]*Watcher{}
|
||||
watchersMtx sync.RWMutex
|
||||
)
|
||||
|
||||
// Watch creates a Watcher for the specified object.
|
||||
// PREVIEW
|
||||
//
|
||||
// A Watcher receives all notifications that are sent to the object on which it
|
||||
// has been created. It exposes two read-only channels: Events() receives all
|
||||
// the NotifyEvents and Errors() receives all occuring errors. A typical code
|
||||
// creating a Watcher could look like this:
|
||||
//
|
||||
// watcher, err := ioctx.Watch(oid)
|
||||
// go func() { // event handler
|
||||
// for ne := range watcher.Events() {
|
||||
// ...
|
||||
// ne.Ack([]byte("response data..."))
|
||||
// ...
|
||||
// }
|
||||
// }()
|
||||
// go func() { // error handler
|
||||
// for err := range watcher.Errors() {
|
||||
// ... handle err ...
|
||||
// }
|
||||
// }()
|
||||
//
|
||||
// CAUTION: the Watcher references the IOContext in which it has been created.
|
||||
// Therefore all watchers must be deleted with the Delete() method before the
|
||||
// IOContext is being destroyed.
|
||||
//
|
||||
// Implements:
|
||||
// int rados_watch2(rados_ioctx_t io, const char* o, uint64_t* cookie,
|
||||
// rados_watchcb2_t watchcb, rados_watcherrcb_t watcherrcb, void* arg)
|
||||
func (ioctx *IOContext) Watch(obj string) (*Watcher, error) {
|
||||
return ioctx.WatchWithTimeout(obj, 0)
|
||||
}
|
||||
|
||||
// WatchWithTimeout creates a watcher on an object. Same as Watcher(), but
|
||||
// different timeout than the default can be specified.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// int rados_watch3(rados_ioctx_t io, const char *o, uint64_t *cookie,
|
||||
// rados_watchcb2_t watchcb, rados_watcherrcb_t watcherrcb, uint32_t timeout,
|
||||
// void *arg);
|
||||
func (ioctx *IOContext) WatchWithTimeout(oid string, timeout time.Duration) (*Watcher, error) {
|
||||
cObj := C.CString(oid)
|
||||
defer C.free(unsafe.Pointer(cObj))
|
||||
var id C.uint64_t
|
||||
watchersMtx.Lock()
|
||||
defer watchersMtx.Unlock()
|
||||
ret := C.rados_watch3(
|
||||
ioctx.ioctx,
|
||||
cObj,
|
||||
&id,
|
||||
(C.rados_watchcb2_t)(C.watchNotifyCb),
|
||||
(C.rados_watcherrcb_t)(C.watchErrorCb),
|
||||
C.uint32_t(timeout.Milliseconds()/1000),
|
||||
nil,
|
||||
)
|
||||
if err := getError(ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
evCh := make(chan NotifyEvent)
|
||||
errCh := make(chan error)
|
||||
w := &Watcher{
|
||||
id: WatcherID(id),
|
||||
ioctx: ioctx,
|
||||
oid: oid,
|
||||
events: evCh,
|
||||
errors: errCh,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
watchers[WatcherID(id)] = w
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// ID returns the WatcherId of the Watcher
|
||||
// PREVIEW
|
||||
func (w *Watcher) ID() WatcherID {
|
||||
return w.id
|
||||
}
|
||||
|
||||
// Events returns a read-only channel, that receives all notifications that are
|
||||
// sent to the object of the Watcher.
|
||||
// PREVIEW
|
||||
func (w *Watcher) Events() <-chan NotifyEvent {
|
||||
return w.events
|
||||
}
|
||||
|
||||
// Errors returns a read-only channel, that receives all errors for the Watcher.
|
||||
// PREVIEW
|
||||
func (w *Watcher) Errors() <-chan error {
|
||||
return w.errors
|
||||
}
|
||||
|
||||
// Check on the status of a Watcher.
|
||||
// PREVIEW
|
||||
//
|
||||
// Returns the time since it was last confirmed. If there is an error, the
|
||||
// Watcher is no longer valid, and should be destroyed with the Delete() method.
|
||||
//
|
||||
// Implements:
|
||||
// int rados_watch_check(rados_ioctx_t io, uint64_t cookie)
|
||||
func (w *Watcher) Check() (time.Duration, error) {
|
||||
ret := C.rados_watch_check(w.ioctx.ioctx, C.uint64_t(w.id))
|
||||
if ret < 0 {
|
||||
return 0, getError(ret)
|
||||
}
|
||||
return time.Millisecond * time.Duration(ret), nil
|
||||
}
|
||||
|
||||
// Delete the watcher. This closes both the event and error channel.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// int rados_unwatch2(rados_ioctx_t io, uint64_t cookie)
|
||||
func (w *Watcher) Delete() error {
|
||||
watchersMtx.Lock()
|
||||
_, ok := watchers[w.id]
|
||||
if ok {
|
||||
delete(watchers, w.id)
|
||||
}
|
||||
watchersMtx.Unlock()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ret := C.rados_unwatch2(w.ioctx.ioctx, C.uint64_t(w.id))
|
||||
if ret != 0 {
|
||||
return getError(ret)
|
||||
}
|
||||
close(w.done) // unblock blocked callbacks
|
||||
close(w.events)
|
||||
close(w.errors)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify sends a notification with the provided data to all Watchers of the
|
||||
// specified object.
|
||||
// PREVIEW
|
||||
//
|
||||
// CAUTION: even if the error is not nil. the returned slices
|
||||
// might still contain data.
|
||||
func (ioctx *IOContext) Notify(obj string, data []byte) ([]NotifyAck, []NotifyTimeout, error) {
|
||||
return ioctx.NotifyWithTimeout(obj, data, 0)
|
||||
}
|
||||
|
||||
// NotifyWithTimeout is like Notify() but with a different timeout than the
|
||||
// default.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// int rados_notify2(rados_ioctx_t io, const char* o, const char* buf, int buf_len,
|
||||
// uint64_t timeout_ms, char** reply_buffer, size_t* reply_buffer_len)
|
||||
func (ioctx *IOContext) NotifyWithTimeout(obj string, data []byte, timeout time.Duration) ([]NotifyAck,
|
||||
[]NotifyTimeout, error) {
|
||||
cObj := C.CString(obj)
|
||||
defer C.free(unsafe.Pointer(cObj))
|
||||
var cResponse *C.char
|
||||
defer C.rados_buffer_free(cResponse)
|
||||
var responseLen C.size_t
|
||||
var dataPtr *C.char
|
||||
if len(data) > 0 {
|
||||
dataPtr = (*C.char)(unsafe.Pointer(&data[0]))
|
||||
}
|
||||
ret := C.rados_notify2(
|
||||
ioctx.ioctx,
|
||||
cObj,
|
||||
dataPtr,
|
||||
C.int(len(data)),
|
||||
C.uint64_t(timeout.Milliseconds()),
|
||||
&cResponse,
|
||||
&responseLen,
|
||||
)
|
||||
// cResponse has been set even if an error is returned, so we decode it anyway
|
||||
acks, timeouts := decodeNotifyResponse(cResponse, responseLen)
|
||||
return acks, timeouts, getError(ret)
|
||||
}
|
||||
|
||||
// Ack sends an acknowledgement with the specified response data to the notfier
|
||||
// of the NotifyEvent. If a notify is not ack'ed, the originating Notify() call
|
||||
// blocks and eventiually times out.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// int rados_notify_ack(rados_ioctx_t io, const char *o, uint64_t notify_id,
|
||||
// uint64_t cookie, const char *buf, int buf_len)
|
||||
func (ne *NotifyEvent) Ack(response []byte) error {
|
||||
watchersMtx.RLock()
|
||||
w, ok := watchers[ne.WatcherID]
|
||||
watchersMtx.RUnlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't ack on deleted watcher %v", ne.WatcherID)
|
||||
}
|
||||
cOID := C.CString(w.oid)
|
||||
defer C.free(unsafe.Pointer(cOID))
|
||||
var respPtr *C.char
|
||||
if len(response) > 0 {
|
||||
respPtr = (*C.char)(unsafe.Pointer(&response[0]))
|
||||
}
|
||||
ret := C.rados_notify_ack(
|
||||
w.ioctx.ioctx,
|
||||
cOID,
|
||||
C.uint64_t(ne.ID),
|
||||
C.uint64_t(ne.WatcherID),
|
||||
respPtr,
|
||||
C.int(len(response)),
|
||||
)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// WatcherFlush flushes all pending notifications of the cluster.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// int rados_watch_flush(rados_t cluster)
|
||||
func (c *Conn) WatcherFlush() error {
|
||||
if !c.connected {
|
||||
return ErrNotConnected
|
||||
}
|
||||
ret := C.rados_watch_flush(c.cluster)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// decoder for this notify response format:
|
||||
// le32 num_acks
|
||||
// {
|
||||
// le64 gid global id for the client (for client.1234 that's 1234)
|
||||
// le64 cookie cookie for the client
|
||||
// le32 buflen length of reply message buffer
|
||||
// u8 buflen payload
|
||||
// } num_acks
|
||||
// le32 num_timeouts
|
||||
// {
|
||||
// le64 gid global id for the client
|
||||
// le64 cookie cookie for the client
|
||||
// } num_timeouts
|
||||
//
|
||||
// NOTE: starting with pacific this is implemented as a C function and this can
|
||||
// be replaced later
|
||||
func decodeNotifyResponse(response *C.char, len C.size_t) ([]NotifyAck, []NotifyTimeout) {
|
||||
if len == 0 || response == nil {
|
||||
return nil, nil
|
||||
}
|
||||
b := (*[math.MaxInt32]byte)(unsafe.Pointer(response))[:len:len]
|
||||
pos := 0
|
||||
|
||||
num := binary.LittleEndian.Uint32(b[pos:])
|
||||
pos += 4
|
||||
acks := make([]NotifyAck, num)
|
||||
for i := range acks {
|
||||
acks[i].NotifierID = NotifierID(binary.LittleEndian.Uint64(b[pos:]))
|
||||
pos += 8
|
||||
acks[i].WatcherID = WatcherID(binary.LittleEndian.Uint64(b[pos:]))
|
||||
pos += 8
|
||||
dataLen := binary.LittleEndian.Uint32(b[pos:])
|
||||
pos += 4
|
||||
if dataLen > 0 {
|
||||
acks[i].Response = C.GoBytes(unsafe.Pointer(&b[pos]), C.int(dataLen))
|
||||
pos += int(dataLen)
|
||||
}
|
||||
}
|
||||
|
||||
num = binary.LittleEndian.Uint32(b[pos:])
|
||||
pos += 4
|
||||
timeouts := make([]NotifyTimeout, num)
|
||||
for i := range timeouts {
|
||||
timeouts[i].NotifierID = NotifierID(binary.LittleEndian.Uint64(b[pos:]))
|
||||
pos += 8
|
||||
timeouts[i].WatcherID = WatcherID(binary.LittleEndian.Uint64(b[pos:]))
|
||||
pos += 8
|
||||
}
|
||||
return acks, timeouts
|
||||
}
|
||||
|
||||
//export watchNotifyCb
|
||||
func watchNotifyCb(_ unsafe.Pointer, notifyID C.uint64_t, id C.uint64_t,
|
||||
notifierID C.uint64_t, cData unsafe.Pointer, dataLen C.size_t) {
|
||||
watchersMtx.RLock()
|
||||
w, ok := watchers[WatcherID(id)]
|
||||
watchersMtx.RUnlock()
|
||||
if !ok {
|
||||
// usually this should not happen, but who knows
|
||||
// TODO: some log message (once we have logging)
|
||||
return
|
||||
}
|
||||
ev := NotifyEvent{
|
||||
ID: NotifyID(notifyID),
|
||||
WatcherID: WatcherID(id),
|
||||
NotifierID: NotifierID(notifierID),
|
||||
}
|
||||
if dataLen > 0 {
|
||||
ev.Data = C.GoBytes(cData, C.int(dataLen))
|
||||
}
|
||||
select {
|
||||
case <-w.done: // unblock when deleted
|
||||
case w.events <- ev:
|
||||
}
|
||||
}
|
||||
|
||||
//export watchErrorCb
|
||||
func watchErrorCb(_ unsafe.Pointer, id C.uint64_t, err C.int) {
|
||||
watchersMtx.RLock()
|
||||
w, ok := watchers[WatcherID(id)]
|
||||
watchersMtx.RUnlock()
|
||||
if !ok {
|
||||
// usually this should not happen, but who knows
|
||||
// TODO: some log message (once we have logging)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-w.done: // unblock when deleted
|
||||
case w.errors <- getError(err):
|
||||
}
|
||||
}
|
@ -1,6 +1,3 @@
|
||||
//go:build ceph_preview
|
||||
// +build ceph_preview
|
||||
|
||||
package rados
|
||||
|
||||
// #cgo LDFLAGS: -lrados
|
||||
@ -40,7 +37,6 @@ func newWriteOpCmpExtStep() *WriteOpCmpExtStep {
|
||||
}
|
||||
|
||||
// CmpExt ensures that given object range (extent) satisfies comparison.
|
||||
// PREVIEW
|
||||
//
|
||||
// Implements:
|
||||
// void rados_write_op_cmpext(rados_write_op_t write_op,
|
24
vendor/github.com/ceph/go-ceph/rbd/rbd.go
generated
vendored
24
vendor/github.com/ceph/go-ceph/rbd/rbd.go
generated
vendored
@ -706,15 +706,13 @@ func (image *Image) BreakLock(client string, cookie string) error {
|
||||
return getError(C.rbd_break_lock(image.image, cClient, cCookie))
|
||||
}
|
||||
|
||||
// ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len, char *buf);
|
||||
// TODO: int64_t rbd_read_iterate(rbd_image_t image, uint64_t ofs, size_t len,
|
||||
// int (*cb)(uint64_t, size_t, const char *, void *), void *arg);
|
||||
// TODO: int rbd_read_iterate2(rbd_image_t image, uint64_t ofs, uint64_t len,
|
||||
// int (*cb)(uint64_t, size_t, const char *, void *), void *arg);
|
||||
// TODO: int rbd_diff_iterate(rbd_image_t image,
|
||||
// const char *fromsnapname,
|
||||
// uint64_t ofs, uint64_t len,
|
||||
// int (*cb)(uint64_t, size_t, int, void *), void *arg);
|
||||
// Read data from the image. The length of the read is determined by the length
|
||||
// of the buffer slice. The position of the read is determined by an internal
|
||||
// offset which is not safe in concurrent code. Prefer ReadAt when possible.
|
||||
//
|
||||
// Implements:
|
||||
// ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len,
|
||||
// char *buf);
|
||||
func (image *Image) Read(data []byte) (int, error) {
|
||||
if err := image.validate(imageIsOpen); err != nil {
|
||||
return 0, err
|
||||
@ -742,7 +740,13 @@ func (image *Image) Read(data []byte) (int, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len, const char *buf);
|
||||
// Write data to an image. The length of the write is determined by the length of
|
||||
// the buffer slice. The position of the write is determined by an internal
|
||||
// offset which is not safe in concurrent code. Prefer WriteAt when possible.
|
||||
//
|
||||
// Implements:
|
||||
// ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len,
|
||||
// const char *buf);
|
||||
func (image *Image) Write(data []byte) (n int, err error) {
|
||||
if err := image.validate(imageIsOpen); err != nil {
|
||||
return 0, err
|
||||
|
77
vendor/modules.txt
vendored
77
vendor/modules.txt
vendored
@ -8,7 +8,7 @@ github.com/armon/go-metrics
|
||||
# github.com/armon/go-radix v1.0.0
|
||||
## explicit
|
||||
github.com/armon/go-radix
|
||||
# github.com/aws/aws-sdk-go v1.42.48
|
||||
# github.com/aws/aws-sdk-go v1.43.3
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/awserr
|
||||
@ -69,9 +69,10 @@ github.com/cenkalti/backoff/v3
|
||||
## explicit; go 1.16
|
||||
github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd
|
||||
github.com/ceph/ceph-csi/api/deploy/ocp
|
||||
# github.com/ceph/go-ceph v0.13.0
|
||||
# github.com/ceph/go-ceph v0.14.0
|
||||
## explicit; go 1.12
|
||||
github.com/ceph/go-ceph/cephfs/admin
|
||||
github.com/ceph/go-ceph/common/admin/manager
|
||||
github.com/ceph/go-ceph/common/commands
|
||||
github.com/ceph/go-ceph/internal/callbacks
|
||||
github.com/ceph/go-ceph/internal/commands
|
||||
@ -696,7 +697,7 @@ gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.23.3 => k8s.io/api v0.23.3
|
||||
# k8s.io/api v0.23.4 => k8s.io/api v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -745,7 +746,7 @@ k8s.io/api/scheduling/v1beta1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apimachinery v0.23.3 => k8s.io/apimachinery v0.23.3
|
||||
# k8s.io/apimachinery v0.23.4 => k8s.io/apimachinery v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -801,7 +802,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.23.3 => k8s.io/apiserver v0.23.3
|
||||
# k8s.io/apiserver v0.23.4 => k8s.io/apiserver v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
@ -842,7 +843,7 @@ k8s.io/apiserver/pkg/util/feature
|
||||
k8s.io/apiserver/pkg/util/webhook
|
||||
k8s.io/apiserver/pkg/util/x509metrics
|
||||
k8s.io/apiserver/pkg/warning
|
||||
# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.23.3
|
||||
# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
|
||||
@ -1085,12 +1086,12 @@ k8s.io/client-go/util/homedir
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.23.3 => k8s.io/cloud-provider v0.23.3
|
||||
# k8s.io/cloud-provider v0.23.4 => k8s.io/cloud-provider v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/component-base v0.23.3 => k8s.io/component-base v0.23.3
|
||||
# k8s.io/component-base v0.23.4 => k8s.io/component-base v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/config
|
||||
@ -1101,7 +1102,7 @@ k8s.io/component-base/metrics/legacyregistry
|
||||
k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/traces
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/component-helpers v0.23.3 => k8s.io/component-helpers v0.23.3
|
||||
# k8s.io/component-helpers v0.23.4 => k8s.io/component-helpers v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/component-helpers/node/util/sysctl
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
@ -1113,14 +1114,14 @@ k8s.io/klog/v2
|
||||
## explicit; go 1.16
|
||||
k8s.io/kube-openapi/pkg/schemaconv
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.3
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/kubectl/pkg/scale
|
||||
k8s.io/kubectl/pkg/util/podutils
|
||||
# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.3
|
||||
# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/kubelet/pkg/apis/stats/v1alpha1
|
||||
# k8s.io/kubernetes v1.23.3
|
||||
# k8s.io/kubernetes v1.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
k8s.io/kubernetes/pkg/api/service
|
||||
@ -1183,7 +1184,7 @@ k8s.io/kubernetes/test/e2e/storage/podlogs
|
||||
k8s.io/kubernetes/test/e2e/storage/utils
|
||||
k8s.io/kubernetes/test/utils
|
||||
k8s.io/kubernetes/test/utils/image
|
||||
# k8s.io/mount-utils v0.23.3 => k8s.io/mount-utils v0.23.3
|
||||
# k8s.io/mount-utils v0.23.4 => k8s.io/mount-utils v0.23.4
|
||||
## explicit; go 1.16
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/utils v0.0.0-20211116205334-6203023598ed
|
||||
@ -1262,29 +1263,29 @@ sigs.k8s.io/yaml
|
||||
# github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
|
||||
# github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||
# gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||
# k8s.io/api => k8s.io/api v0.23.3
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.23.3
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.23.3
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3
|
||||
# k8s.io/client-go => k8s.io/client-go v0.23.3
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.23.3
|
||||
# k8s.io/component-base => k8s.io/component-base v0.23.3
|
||||
# k8s.io/component-helpers => k8s.io/component-helpers v0.23.3
|
||||
# k8s.io/controller-manager => k8s.io/controller-manager v0.23.3
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.23.3
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.23.3
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.23.3
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3
|
||||
# k8s.io/metrics => k8s.io/metrics v0.23.3
|
||||
# k8s.io/mount-utils => k8s.io/mount-utils v0.23.3
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3
|
||||
# k8s.io/api => k8s.io/api v0.23.4
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.4
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.23.4
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.23.4
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.4
|
||||
# k8s.io/client-go => k8s.io/client-go v0.23.4
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.4
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.4
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.23.4
|
||||
# k8s.io/component-base => k8s.io/component-base v0.23.4
|
||||
# k8s.io/component-helpers => k8s.io/component-helpers v0.23.4
|
||||
# k8s.io/controller-manager => k8s.io/controller-manager v0.23.4
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.23.4
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.4
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.4
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.4
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.4
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.4
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.23.4
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.23.4
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.4
|
||||
# k8s.io/metrics => k8s.io/metrics v0.23.4
|
||||
# k8s.io/mount-utils => k8s.io/mount-utils v0.23.4
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.4
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.4
|
||||
# layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||
|
Loading…
Reference in New Issue
Block a user