Merge pull request #66 from ceph/devel

Sync rhs/ceph-csi:devel with ceph/ceph-csi:devel
This commit is contained in:
OpenShift Merge Robot 2022-01-12 15:46:21 +01:00 committed by GitHub
commit 4a2a648a0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 3960 additions and 344 deletions

View File

@ -4,16 +4,63 @@ defaults:
# mergify.io has removed bot_account from its free open source plan.
comment:
# bot_account: ceph-csi-bot # mergify[bot] will be commenting.
merge:
queue:
# merge_bot_account: ceph-csi-bot #mergify[bot] will be merging prs.
# update_bot_account: ceph-csi-bot #mergify will randomly pick and use
# credentials of users with write access to repo to rebase prs.
name: default
method: rebase
rebase_fallback: merge
strict: smart
strict_method: rebase
rebase:
# bot_account: ceph-csi-bot # same as update_bot_account.
update_method: rebase
queue_rules:
- name: default
conditions:
# Conditions to get out of the queue (= merged)
- or:
- and:
- base~=^(devel)|(release-.+)$
- "status-success=codespell"
- "status-success=multi-arch-build"
- "status-success=go-test"
- "status-success=golangci-lint"
- "status-success=mod-check"
- "status-success=lint-extras"
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.21"
- "status-success=ci/centos/mini-e2e/k8s-1.22"
- "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
- and:
- base=release-v3.4
- "status-success=codespell"
- "status-success=multi-arch-build"
- "status-success=go-test"
- "status-success=commitlint"
- "status-success=golangci-lint"
- "status-success=mod-check"
- "status-success=lint-extras"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
- "status-success=ci/centos/mini-e2e/k8s-1.21"
- "status-success=ci/centos/mini-e2e/k8s-1.22"
- "status-success=ci/centos/mini-e2e/k8s-1.23"
- "status-success=ci/centos/upgrade-tests-cephfs"
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
- and:
- base=ci/centos
- "status-success=ci/centos/job-validation"
- "status-success=ci/centos/jjb-validate"
- "status-success=DCO"
pull_request_rules:
- name: remove outdated approvals
@ -61,7 +108,8 @@ pull_request_rules:
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
- name: automatic merge
@ -92,7 +140,8 @@ pull_request_rules:
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
- name: automatic merge PR having ready-to-merge label
@ -122,7 +171,8 @@ pull_request_rules:
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
- name: backport patches to release-v3.4 branch
@ -160,7 +210,8 @@ pull_request_rules:
- "status-success=ci/centos/upgrade-tests-rbd"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
- name: remove outdated approvals on ci/centos
@ -182,7 +233,8 @@ pull_request_rules:
- "status-success=ci/centos/jjb-validate"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
- name: automatic merge PR having ready-to-merge label on ci/centos
@ -196,7 +248,8 @@ pull_request_rules:
- "status-success=ci/centos/jjb-validate"
- "status-success=DCO"
actions:
merge: {}
queue:
name: default
dismiss_reviews: {}
delete_head_branch: {}
##

View File

@ -81,7 +81,9 @@ for its support details.
| ------ | --------------------------------------------------------- | -------------- | ------------------ | ---------------- | -------------------- | ------------------ |
| RBD | Dynamically provision, de-provision Block mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.14.0 |
| | Dynamically provision, de-provision Block mode RWX volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.14.0 |
| | Dynamically provision, de-provision Block mode RWOP volume| Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=14.0.0) | >= v1.22.0 |
| | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=14.0.0) | >= v1.22.0 |
| | Provision File Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.17.0 |
| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.16.0 |
| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.17.0 |
@ -96,6 +98,7 @@ for its support details.
| CephFS | Dynamically provision, de-provision File mode RWO volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWX volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=14.0.0) | >= v1.22.0 |
| | Creating and deleting snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
| | Provision volume from snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
| | Provision volume from another volume | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |

View File

@ -13,7 +13,7 @@ CSI_IMAGE_VERSION=canary
# Ceph version to use
BASE_IMAGE=docker.io/ceph/ceph:v16
CEPH_VERSION=octopus
CEPH_VERSION=pacific
# standard Golang options
GOLANG_VERSION=1.17.5

View File

@ -12,20 +12,26 @@ Below parameters/values can be used to establish the connection to the HPCS
service from the CSI driver and to make use of the encryption operations:
```text
* KMS_SERVICE_NAME=[kms_service_name]
* IBM_KP_BASE_URL
The Key Protect/HPCS connection URL.
* IBM_KP_TOKEN_URL
The Token Authenticaltion URL of KeyProtect/HPCS service.
* KMS_SERVICE_NAME
A unique name for the key management service within the project.
* KP_SERVICE_INSTANCE_ID=[service_instance_id]
* IBM_KP_SERVICE_INSTANCE_ID
The Instance ID of the IBM HPCS service, ex: crn:v1:bluemix:public:hs-crypto:us-south:a/5d19cf8b82874c2dab37e397426fbc42:e2ae65ff-954b-453f-b0d7-fc5064c203ce::
* KP_SERVICE_API_KEY=[service_api_key]
* IBM_KP_SERVICE_API_KEY
Ex: 06x6DbTkVQ-qCRmq9cK-p9xOQpU2UwJMcdjnIDdr0g2R
* KP_CUSTOMER_ROOT_KEY=[customer_root_key]
* IBM_KP_CUSTOMER_ROOT_KEY
Ex: c7a9aa91-5cb5-48da-a821-e85c27b99d92
* KP_REGION = [region of the key protect service]
Ex: us-south-2
* IBM_KP_REGION
Region of the key protect service, ex: us-south-2
```
### Values provided in the connection Secret

View File

@ -290,13 +290,14 @@ var _ = Describe("cephfs", func() {
It("Test CephFS CSI", func() {
pvcPath := cephFSExamplePath + "pvc.yaml"
appPath := cephFSExamplePath + "pod.yaml"
appRWOPPath := cephFSExamplePath + "pod-rwop.yaml"
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
appClonePath := cephFSExamplePath + "pod-restore.yaml"
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
snapshotPath := cephFSExamplePath + "snapshot.yaml"
appEphemeralPath := cephFSExamplePath + "pod-ephemeral.yaml"
pvcRWOPPath := cephFSExamplePath + "pvc-rwop.yaml"
By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
if err != nil {
@ -360,6 +361,48 @@ var _ = Describe("cephfs", func() {
}
})
By("verify RWOP volume support", func() {
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
if err != nil {
e2elog.Failf("failed to create CephFS storageclass: %v", err)
}
pvc, err := loadPVC(pvcRWOPPath)
if err != nil {
e2elog.Failf("failed to load PVC: %v", err)
}
pvc.Namespace = f.UniqueName
// create application
app, err := loadApp(appRWOPPath)
if err != nil {
e2elog.Failf("failed to load application: %v", err)
}
app.Namespace = f.UniqueName
baseAppName := app.Name
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC: %v", err)
}
err = createApp(f.ClientSet, app, deployTimeout)
if err != nil {
e2elog.Failf("failed to create application: %v", err)
}
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
if err != nil {
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
}
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
}
}
})
By("check static PVC", func() {
scPath := cephFSExamplePath + "secret.yaml"
err := validateCephFsStaticPV(f, appPath, scPath)
@ -1218,6 +1261,34 @@ var _ = Describe("cephfs", func() {
e2elog.Failf("failed to delete PVC: %v", err)
}
})
By("restore snapshot to a bigger size PVC", func() {
err := validateBiggerPVCFromSnapshot(f,
pvcPath,
appPath,
snapshotPath,
pvcClonePath,
appClonePath)
if err != nil {
e2elog.Failf("failed to validate restore bigger size clone: %v", err)
}
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
})
By("clone PVC to a bigger size PVC", func() {
err := validateBiggerCloneFromPVC(f,
pvcPath,
appPath,
pvcSmartClonePath,
appSmartClonePath)
if err != nil {
e2elog.Failf("failed to validate bigger size clone: %v", err)
}
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
})
// Make sure this should be last testcase in this file, because
// it deletes pool
By("Create a PVC and delete PVC when backend pool deleted", func() {

View File

@ -62,7 +62,7 @@ func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes
return fmt.Errorf("failed to create configmap: %w", err)
}
// restart csi pods for the configmap to take effect.
err = recreateCSIRBDPods(f)
err = recreateCSIPods(f, rbdPodLabels, rbdDaemonsetName, rbdDeploymentName)
if err != nil {
return fmt.Errorf("failed to recreate rbd csi pods: %w", err)
}

View File

@ -33,6 +33,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const errRWOPConflict = "node has pod using PersistentVolumeClaim with the same name and ReadWriteOncePod access mode."
// getDaemonSetLabelSelector returns labels of daemonset given name and namespace dynamically,
// needed since labels are not same for helm and non-helm deployments.
func getDaemonSetLabelSelector(f *framework.Framework, ns, daemonSetName string) (string, error) {
@ -395,3 +397,52 @@ func getKernelVersionFromDaemonset(f *framework.Framework, ns, dsn, cn string) (
return kernelRelease, nil
}
// recreateCSIPods delete the daemonset and deployment pods based on the selectors passed in.
func recreateCSIPods(f *framework.Framework, podLabels, daemonsetName, deploymentName string) error {
err := deletePodWithLabel(podLabels, cephCSINamespace, false)
if err != nil {
return fmt.Errorf("failed to delete pods with labels (%s): %w", podLabels, err)
}
// wait for csi pods to come up
err = waitForDaemonSets(daemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
return fmt.Errorf("timeout waiting for daemonset pods: %w", err)
}
err = waitForDeploymentComplete(f.ClientSet, deploymentName, cephCSINamespace, deployTimeout)
if err != nil {
return fmt.Errorf("timeout waiting for deployment to be in running state: %w", err)
}
return nil
}
// validateRWOPPodCreation validates the second pod creation failure scenario with RWOP pvc.
func validateRWOPPodCreation(
f *framework.Framework,
pvc *v1.PersistentVolumeClaim,
app *v1.Pod,
baseAppName string) error {
var err error
// create one more app with same PVC
name := fmt.Sprintf("%s%d", f.UniqueName, deployTimeout)
app.Name = name
err = createAppErr(f.ClientSet, app, deployTimeout, errRWOPConflict)
if err != nil {
return fmt.Errorf("application should not go to running state due to RWOP access mode: %w", err)
}
err = deletePod(name, app.Namespace, f.ClientSet, deployTimeout)
if err != nil {
return fmt.Errorf("failed to delete application: %w", err)
}
app.Name = baseAppName
err = deletePVCAndApp("", f, pvc, app)
if err != nil {
return fmt.Errorf("failed to delete PVC and application: %w", err)
}
return nil
}

View File

@ -70,9 +70,13 @@ var (
appPath = rbdExamplePath + "pod.yaml"
rawPvcPath = rbdExamplePath + "raw-block-pvc.yaml"
rawAppPath = rbdExamplePath + "raw-block-pod.yaml"
rawAppRWOPPath = rbdExamplePath + "raw-block-pod-rwop.yaml"
rawPVCRWOPPath = rbdExamplePath + "raw-block-pvc-rwop.yaml"
pvcClonePath = rbdExamplePath + "pvc-restore.yaml"
pvcSmartClonePath = rbdExamplePath + "pvc-clone.yaml"
pvcBlockSmartClonePath = rbdExamplePath + "pvc-block-clone.yaml"
pvcRWOPPath = rbdExamplePath + "pvc-rwop.yaml"
appRWOPPath = rbdExamplePath + "pod-rwop.yaml"
appClonePath = rbdExamplePath + "pod-restore.yaml"
appSmartClonePath = rbdExamplePath + "pod-clone.yaml"
appBlockSmartClonePath = rbdExamplePath + "block-pod-clone.yaml"
@ -506,6 +510,74 @@ var _ = Describe("RBD", func() {
validateRBDImageCount(f, 0, defaultRBDPool)
})
By("create a Block mode RWOP PVC and bind it to more than one app", func() {
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
pvc, err := loadPVC(rawPVCRWOPPath)
if err != nil {
e2elog.Failf("failed to load PVC: %v", err)
}
pvc.Namespace = f.UniqueName
app, err := loadApp(rawAppRWOPPath)
if err != nil {
e2elog.Failf("failed to load application: %v", err)
}
app.Namespace = f.UniqueName
baseAppName := app.Name
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 1, defaultRBDPool)
err = createApp(f.ClientSet, app, deployTimeout)
if err != nil {
e2elog.Failf("failed to create application: %v", err)
}
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
if err != nil {
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool)
}
})
By("create a RWOP PVC and bind it to more than one app", func() {
if k8sVersionGreaterEquals(f.ClientSet, 1, 22) {
pvc, err := loadPVC(pvcRWOPPath)
if err != nil {
e2elog.Failf("failed to load PVC: %v", err)
}
pvc.Namespace = f.UniqueName
app, err := loadApp(appRWOPPath)
if err != nil {
e2elog.Failf("failed to load application: %v", err)
}
app.Namespace = f.UniqueName
baseAppName := app.Name
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 1, defaultRBDPool)
err = createApp(f.ClientSet, app, deployTimeout)
if err != nil {
e2elog.Failf("failed to create application: %v", err)
}
err = validateRWOPPodCreation(f, pvc, app, baseAppName)
if err != nil {
e2elog.Failf("failed to validate RWOP pod creation: %v", err)
}
// validate created backend rbd images
validateRBDImageCount(f, 0, defaultRBDPool)
}
})
By("create an erasure coded PVC and bind it to an app", func() {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {

View File

@ -1013,22 +1013,3 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
return err
}
func recreateCSIRBDPods(f *framework.Framework) error {
err := deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)",
cephCSINamespace, false)
if err != nil {
return fmt.Errorf("failed to delete pods with labels: %w", err)
}
// wait for csi pods to come up
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
return fmt.Errorf("timeout waiting for daemonset pods: %w", err)
}
err = waitForDeploymentComplete(f.ClientSet, rbdDeploymentName, cephCSINamespace, deployTimeout)
if err != nil {
return fmt.Errorf("timeout waiting for deployment to be in running state: %w", err)
}
return nil
}

View File

@ -60,6 +60,8 @@ const (
appLabel = "write-data-in-pod"
noError = ""
// labels/selector used to list/delete rbd pods.
rbdPodLabels = "app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)"
)
var (

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: csi-cephfs-demo-rwop-pod
spec:
containers:
- name: web-server
image: docker.io/library/nginx:latest
volumeMounts:
- name: mypvc
mountPath: /var/lib/www
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-rwop-pvc
readOnly: false

View File

@ -0,0 +1,12 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-cephfs-rwop-pvc
spec:
accessModes:
- ReadWriteOncePod
resources:
requests:
storage: 1Gi
storageClassName: csi-cephfs-sc

View File

@ -57,15 +57,17 @@ data:
aws-metadata-test: |-
{
"KMS_PROVIDER": "aws-metadata",
"KMS_SECRET_NAME": "ceph-csi-aws-credentials",
"IBM_KP_SECRET_NAME": "ceph-csi-aws-credentials",
"AWS_REGION": "us-west-2"
}
kp-metadata-test: |-
{
"KMS_PROVIDER": "kp-metadata",
"KMS_SECRET_NAME": "ceph-csi-kp-credentials",
"KP_SERVICE_INSTANCE_ID": "7abef064-01dd-4237-9ea5-8b3890970be3",
"KP_REGION": "us-south-2",
"IBM_KP_SECRET_NAME": "ceph-csi-kp-credentials",
"IBM_KP_SERVICE_INSTANCE_ID": "7abef064-01dd-4237-9ea5-8b3890970be3",
"IBM_KP_BASE_URL": "https://us-south.kms.cloud.ibm.com",
"IBM_KP_TOKEN_URL": ""https://iam.cloud.ibm.com/oidc/token",
"IBM_KP_REGION": "us-south-2",
}
metadata:
name: csi-kms-connection-details

View File

@ -7,7 +7,7 @@ kind: Secret
metadata:
name: ceph-csi-kp-credentials
stringData:
KP_SERVICE_API_KEY: "UhMN3Jko1pCpDPpFV65N8dYANBv5vF97QuNHqXVHmKa0"
KP_CUSTOMER_ROOT_KEY: "c7a9aa91-5cb5-48da-a821-e85c27b99d92"
KP_SESSION_TOKEN: ""
KP_CRK_ARN: ""
IBM_KP_SERVICE_API_KEY: "UhMN3Jko1pCpDPpFV65N8dYANBv5vF97QuNHqXVHmKa0"
IBM_KP_CUSTOMER_ROOT_KEY: "c7a9aa91-5cb5-48da-a821-e85c27b99d92"
IBM_KP_SESSION_TOKEN: ""
IBM_KP_CRK_ARN: ""

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-fs-rwop-pod
spec:
containers:
- name: web-server
image: docker.io/library/nginx:latest
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-rwop-pvc
readOnly: false

View File

@ -0,0 +1,12 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-rwop-pvc
spec:
accessModes:
- ReadWriteOncePod
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-rwop-pod
spec:
containers:
- name: centos
image: quay.io/centos/centos:latest
command: ["/bin/sleep", "infinity"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: raw-block-rwop-pvc

View File

@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: raw-block-rwop-pvc
spec:
accessModes:
- ReadWriteOncePod
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc

4
go.mod
View File

@ -6,10 +6,10 @@ require (
github.com/IBM/keyprotect-go-client v0.7.0
github.com/aws/aws-sdk-go v1.42.7
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
github.com/ceph/go-ceph v0.12.0
github.com/ceph/go-ceph v0.13.0
github.com/container-storage-interface/spec v1.5.0
github.com/csi-addons/replication-lib-utils v0.2.0
github.com/csi-addons/spec v0.1.2-0.20211123125058-fd968c478af7
github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe
github.com/golang/protobuf v1.5.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0

8
go.sum
View File

@ -168,8 +168,8 @@ github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
github.com/ceph/go-ceph v0.12.0 h1:nlFgKQZXOFR4oMnzXsKwTr79Y6EYDwqTrpigICGy/Tw=
github.com/ceph/go-ceph v0.12.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY=
github.com/ceph/go-ceph v0.13.0 h1:69dgIPlNHD2OCz98T0benI4++vcnShGcpQK4RIALjw4=
github.com/ceph/go-ceph v0.13.0/go.mod h1:mafFpf5Vg8Ai8Bd+FAMvKBHLmtdpTXdRP/TNq8XWegY=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
@ -243,8 +243,8 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ
github.com/csi-addons/replication-lib-utils v0.2.0 h1:tGs42wfjkObbBo/98a3uxTFWEJ1dq5PIMqPWtdLd040=
github.com/csi-addons/replication-lib-utils v0.2.0/go.mod h1:ROQlEsc2EerVtc/K/C+6Hx8pqaQ9MVy9xFFpyKfI9lc=
github.com/csi-addons/spec v0.1.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/csi-addons/spec v0.1.2-0.20211123125058-fd968c478af7 h1:MW8Xb+AXbPndsaZpjsQxZOzB47DB+CNUo8XzrN0zRdQ=
github.com/csi-addons/spec v0.1.2-0.20211123125058-fd968c478af7/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe h1:Q2sxgtdRV4Je1R2eLCUPrR/KQZxkSbesGrpCjl0/mU4=
github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=

View File

@ -211,11 +211,7 @@ func (cs *ControllerServer) CreateVolume(
if vID != nil {
if sID != nil || pvID != nil {
// while cloning the volume the size is not populated properly to the new volume now.
// it will be fixed in cephfs soon with the parentvolume size. Till then by below
// resize we are making sure we return or satisfy the requested size by setting the size
// explicitly
err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
if err != nil {
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false)
if purgeErr != nil {
@ -235,6 +231,7 @@ func (cs *ControllerServer) CreateVolume(
return nil, status.Error(codes.Internal, err.Error())
}
}
volumeContext := req.GetParameters()
volumeContext["subvolumeName"] = vID.FsSubvolName
volumeContext["subvolumePath"] = volOptions.RootPath

View File

@ -131,13 +131,14 @@ func CreateCloneFromSubvolume(
return cloneState.toError()
}
// This is a work around to fix sizing issue for cloned images
err = volOpt.ResizeVolume(ctx, cloneID, volOpt.Size)
err = volOpt.ExpandVolume(ctx, cloneID, volOpt.Size)
if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
return err
}
// As we completed clone, remove the intermediate snap
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
@ -227,10 +228,8 @@ func CreateCloneFromSnapshot(
if cloneState != cephFSCloneComplete {
return cloneState.toError()
}
// The clonedvolume currently does not reflect the proper size due to an issue in cephfs
// however this is getting addressed in cephfs and the parentvolume size will be reflected
// in the new cloned volume too. Till then we are explicitly making the size set
err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)

View File

@ -191,6 +191,23 @@ func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.V
return nil
}
// ExpandVolume will expand the volume if the requested size is greater than
// the subvolume size.
func (vo *VolumeOptions) ExpandVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error {
// get the subvolume size for comparison with the requested size.
info, err := vo.GetSubVolumeInfo(ctx, volID)
if err != nil {
return err
}
// resize if the requested size is greater than the current size.
if vo.Size > info.BytesQuota {
log.DebugLog(ctx, "clone %s size %d is greater than requested size %d", volID, info.BytesQuota, bytesQuota)
err = vo.ResizeVolume(ctx, volID, bytesQuota)
}
return err
}
// ResizeVolume will try to use ceph fs subvolume resize command to resize the
// subvolume. If the command is not available as a fallback it will use
// CreateVolume to resize the subvolume.

View File

@ -361,6 +361,7 @@ func NewVolumeOptionsFromVolID(
if err == nil {
volOptions.RootPath = info.Path
volOptions.Features = info.Features
volOptions.Size = info.BytesQuota
}
if errors.Is(err, cerrors.ErrInvalidCommand) {
@ -580,6 +581,7 @@ func NewSnapshotOptionsFromID(
return &volOptions, nil, &sid, err
}
volOptions.Features = subvolInfo.Features
volOptions.Size = subvolInfo.BytesQuota
info, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
if err != nil {

View File

@ -103,10 +103,14 @@ func (fs *Driver) Run(conf *util.Config) {
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
})
fs.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
})
}
// Create gRPC servers

View File

@ -388,6 +388,13 @@ func (ns *NodeServer) NodeGetCapabilities(
},
},
},
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
},
},
},
},
}, nil
}

View File

@ -0,0 +1,194 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networkfence
import (
"context"
"errors"
"fmt"
"net"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/csi-addons/spec/lib/go/fence"
)
const blocklistTime = "157784760"
// NetworkFence contains the CIDR blocks to be blocked.
type NetworkFence struct {
Cidr []string
Monitors string
cr *util.Credentials
}
// NewNetworkFence returns a networkFence struct object from the Network fence/unfence request.
func NewNetworkFence(
ctx context.Context,
cr *util.Credentials,
cidrs []*fence.CIDR,
fenceOptions map[string]string) (*NetworkFence, error) {
var err error
nwFence := &NetworkFence{}
nwFence.Cidr, err = GetCIDR(cidrs)
if err != nil {
return nil, fmt.Errorf("failed to get list of CIDRs: %w", err)
}
clusterID, err := util.GetClusterID(fenceOptions)
if err != nil {
return nil, fmt.Errorf("failed to fetch clusterID: %w", err)
}
nwFence.Monitors, _, err = util.GetMonsAndClusterID(ctx, clusterID, false)
if err != nil {
return nil, fmt.Errorf("failed to get monitors for clusterID %q: %w", clusterID, err)
}
nwFence.cr = cr
return nwFence, nil
}
// addCephBlocklist adds an IP to ceph osd blocklist.
func (nf *NetworkFence) addCephBlocklist(ctx context.Context, ip string) error {
arg := []string{
"--id", nf.cr.ID,
"--keyfile=" + nf.cr.KeyFile,
"-m", nf.Monitors,
}
// TODO: add blocklist till infinity.
// Currently, ceph does not provide the functionality to blocklist IPs
// for infinite time. As a workaround, add a blocklist for 5 YEARS to
// represent infinity from ceph-csi side.
// At any point in this time, the IPs can be unblocked by an UnfenceClusterReq.
// This needs to be updated once ceph provides functionality for the same.
cmd := []string{"osd", "blocklist", "add", ip, blocklistTime}
cmd = append(cmd, arg...)
_, _, err := util.ExecCommand(ctx, "ceph", cmd...)
if err != nil {
return fmt.Errorf("failed to blocklist IP %q: %w", ip, err)
}
log.DebugLog(ctx, "blocklisted IP %q successfully", ip)
return nil
}
// AddNetworkFence blocks access for all the IPs in the IP range mentioned via the CIDR block
// using a network fence.
func (nf *NetworkFence) AddNetworkFence(ctx context.Context) error {
// for each CIDR block, convert it into a range of IPs so as to perform blocklisting operation.
for _, cidr := range nf.Cidr {
// fetch the list of IPs from a CIDR block
hosts, err := getIPRange(cidr)
if err != nil {
return fmt.Errorf("failed to convert CIDR block %s to corresponding IP range: %w", cidr, err)
}
// add ceph blocklist for each IP in the range mentioned by the CIDR
for _, host := range hosts {
err = nf.addCephBlocklist(ctx, host)
if err != nil {
return err
}
}
}
return nil
}
// getIPRange returns a list of IPs from the IP range
// corresponding to a CIDR block.
func getIPRange(cidr string) ([]string, error) {
var hosts []string
netIP, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
for ip := netIP.Mask(ipnet.Mask); ipnet.Contains(ip); incIP(ip) {
hosts = append(hosts, ip.String())
}
return hosts, nil
}
// incIP is an helper function for getIPRange() for incrementing
// IP values to return all IPs in a range.
func incIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// Cidrs is a list of CIDR structs.
type Cidrs []*fence.CIDR
// GetCIDR converts a CIDR struct list to a list.
func GetCIDR(cidrs Cidrs) ([]string, error) {
var cidrList []string
for _, cidr := range cidrs {
cidrList = append(cidrList, cidr.Cidr)
}
if len(cidrList) < 1 {
return nil, errors.New("the CIDR cannot be empty")
}
return cidrList, nil
}
// removeCephBlocklist removes an IP from ceph osd blocklist.
func (nf *NetworkFence) removeCephBlocklist(ctx context.Context, ip string) error {
arg := []string{
"--id", nf.cr.ID,
"--keyfile=" + nf.cr.KeyFile,
"-m", nf.Monitors,
}
cmd := []string{"osd", "blocklist", "rm", ip}
cmd = append(cmd, arg...)
_, stdErr, err := util.ExecCommand(ctx, "ceph", cmd...)
if err != nil {
return fmt.Errorf("failed to unblock IP %q: %v %w", ip, stdErr, err)
}
log.DebugLog(ctx, "unblocked IP %q successfully", ip)
return nil
}
// RemoveNetworkFence unblocks access for all the IPs in the IP range mentioned via the CIDR block
// using a network fence.
func (nf *NetworkFence) RemoveNetworkFence(ctx context.Context) error {
// for each CIDR block, convert it into a range of IPs so as to undo blocklisting operation.
for _, cidr := range nf.Cidr {
// fetch the list of IPs from a CIDR block
hosts, err := getIPRange(cidr)
if err != nil {
return fmt.Errorf("failed to convert CIDR block %s to corresponding IP range", cidr)
}
// remove ceph blocklist for each IP in the range mentioned by the CIDR
for _, host := range hosts {
err := nf.removeCephBlocklist(ctx, host)
if err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networkfence
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetIPRange(t *testing.T) {
t.Parallel()
tests := []struct {
cidr string
expectedIPs []string
}{
{
cidr: "192.168.1.0/31",
expectedIPs: []string{"192.168.1.0", "192.168.1.1"},
},
{
cidr: "10.0.0.0/30",
expectedIPs: []string{"10.0.0.0", "10.0.0.1", "10.0.0.2", "10.0.0.3"},
},
{
cidr: "fd4a:ecbc:cafd:4e49::/127",
expectedIPs: []string{"fd4a:ecbc:cafd:4e49::", "fd4a:ecbc:cafd:4e49::1"},
},
}
for _, tt := range tests {
ts := tt
t.Run(ts.cidr, func(t *testing.T) {
t.Parallel()
got, err := getIPRange(ts.cidr)
assert.NoError(t, err)
// validate if number of IPs in the range is same as expected, if not, fail.
assert.ElementsMatch(t, ts.expectedIPs, got)
})
}
}

View File

@ -75,6 +75,19 @@ func (is *IdentityServer) GetCapabilities(
Type: identity.Capability_Service_CONTROLLER_SERVICE,
},
},
},
&identity.Capability{
Type: &identity.Capability_ReclaimSpace_{
ReclaimSpace: &identity.Capability_ReclaimSpace{
Type: identity.Capability_ReclaimSpace_OFFLINE,
},
},
}, &identity.Capability{
Type: &identity.Capability_NetworkFence_{
NetworkFence: &identity.Capability_NetworkFence{
Type: identity.Capability_NetworkFence_NETWORK_FENCE,
},
},
})
}
@ -87,6 +100,13 @@ func (is *IdentityServer) GetCapabilities(
Type: identity.Capability_Service_NODE_SERVICE,
},
},
},
&identity.Capability{
Type: &identity.Capability_ReclaimSpace_{
ReclaimSpace: &identity.Capability_ReclaimSpace{
Type: identity.Capability_ReclaimSpace_ONLINE,
},
},
})
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"errors"
nf "github.com/ceph/ceph-csi/internal/csi-addons/networkfence"
"github.com/ceph/ceph-csi/internal/util"
"github.com/csi-addons/spec/lib/go/fence"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// FenceControllerServer struct of rbd CSI driver with supported methods
// of CSI-addons networkfence controller service spec.
type FenceControllerServer struct {
*fence.UnimplementedFenceControllerServer
}
// NewFenceControllerServer creates a new IdentityServer which handles
// the Identity Service requests from the CSI-Addons specification.
func NewFenceControllerServer() *FenceControllerServer {
return &FenceControllerServer{}
}
func (fcs *FenceControllerServer) RegisterService(server grpc.ServiceRegistrar) {
fence.RegisterFenceControllerServer(server, fcs)
}
// validateFenceClusterNetworkReq checks the sanity of FenceClusterNetworkRequest.
func validateNetworkFenceReq(fenceClients []*fence.CIDR, options map[string]string) error {
if len(fenceClients) == 0 {
return errors.New("CIDR block cannot be empty")
}
if value, ok := options["clusterID"]; !ok || value == "" {
return errors.New("missing or empty clusterID")
}
return nil
}
// FenceClusterNetwork blocks access to a CIDR block by creating a network fence.
// It adds the range of IPs to the osd blocklist, which helps ceph in denying access
// to the malicious clients to prevent data corruption.
func (fcs *FenceControllerServer) FenceClusterNetwork(
ctx context.Context,
req *fence.FenceClusterNetworkRequest) (*fence.FenceClusterNetworkResponse, error) {
err := validateNetworkFenceReq(req.GetCidrs(), req.Parameters)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
nwFence, err := nf.NewNetworkFence(ctx, cr, req.Cidrs, req.GetParameters())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
err = nwFence.AddNetworkFence(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to fence CIDR block %q: %s", nwFence.Cidr, err.Error())
}
return &fence.FenceClusterNetworkResponse{}, nil
}
// UnfenceClusterNetwork unblocks the access to a CIDR block by removing the network fence.
func (fcs *FenceControllerServer) UnfenceClusterNetwork(
ctx context.Context,
req *fence.UnfenceClusterNetworkRequest) (*fence.UnfenceClusterNetworkResponse, error) {
err := validateNetworkFenceReq(req.GetCidrs(), req.Parameters)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
nwFence, err := nf.NewNetworkFence(ctx, cr, req.Cidrs, req.GetParameters())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
err = nwFence.RemoveNetworkFence(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to unfence CIDR block %q: %s", nwFence.Cidr, err.Error())
}
return &fence.UnfenceClusterNetworkResponse{}, nil
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
fence "github.com/csi-addons/spec/lib/go/fence"
)
// TestFenceClusterNetwork is a minimal test for the FenceClusterNetwork()
// procedure. During unit-testing, there is no Ceph cluster available, so
// actual operations can not be performed.
func TestFenceClusterNetwork(t *testing.T) {
t.Parallel()
controller := NewFenceControllerServer()
req := &fence.FenceClusterNetworkRequest{
Parameters: map[string]string{},
Secrets: nil,
Cidrs: nil,
}
_, err := controller.FenceClusterNetwork(context.TODO(), req)
assert.Error(t, err)
}
// TestUnfenceClusterNetwork is a minimal test for the UnfenceClusterNetwork()
// procedure. During unit-testing, there is no Ceph cluster available, so actual
// operations can not be performed.
func TestUnfenceClusterNetwork(t *testing.T) {
t.Parallel()
controller := NewFenceControllerServer()
req := &fence.UnfenceClusterNetworkRequest{
Parameters: map[string]string{},
Secrets: nil,
Cidrs: nil,
}
_, err := controller.UnfenceClusterNetwork(context.TODO(), req)
assert.Error(t, err)
}

View File

@ -0,0 +1,147 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"fmt"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
rbdutil "github.com/ceph/ceph-csi/internal/rbd"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
rs "github.com/csi-addons/spec/lib/go/reclaimspace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ReclaimSpaceControllerServer struct of rbd CSI driver with supported methods
// of CSI-addons reclaimspace controller service spec.
type ReclaimSpaceControllerServer struct {
*rs.UnimplementedReclaimSpaceControllerServer
}
// NewReclaimSpaceControllerServer creates a new IdentityServer which handles
// the Identity Service requests from the CSI-Addons specification.
func NewReclaimSpaceControllerServer() *ReclaimSpaceControllerServer {
return &ReclaimSpaceControllerServer{}
}
func (rscs *ReclaimSpaceControllerServer) RegisterService(server grpc.ServiceRegistrar) {
rs.RegisterReclaimSpaceControllerServer(server, rscs)
}
func (rscs *ReclaimSpaceControllerServer) ControllerReclaimSpace(
ctx context.Context,
req *rs.ControllerReclaimSpaceRequest) (*rs.ControllerReclaimSpaceResponse, error) {
volumeID := req.GetVolumeId()
if volumeID == "" {
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
}
cr, err := util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
rbdVol, err := rbdutil.GenVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
if err != nil {
return nil, status.Errorf(codes.Aborted, "failed to find volume with ID %q: %s", volumeID, err.Error())
}
defer rbdVol.Destroy()
err = rbdVol.Sparsify()
if err != nil {
// TODO: check for different error codes?
return nil, status.Errorf(codes.Internal, "failed to sparsify volume %q: %s", rbdVol, err.Error())
}
return &rs.ControllerReclaimSpaceResponse{}, nil
}
// ReclaimSpaceNodeServer struct of rbd CSI driver with supported methods
// of CSI-addons reclaimspace controller service spec.
type ReclaimSpaceNodeServer struct {
*rs.UnimplementedReclaimSpaceNodeServer
}
// NewReclaimSpaceNodeServer creates a new IdentityServer which handles the
// Identity Service requests from the CSI-Addons specification.
func NewReclaimSpaceNodeServer() *ReclaimSpaceNodeServer {
return &ReclaimSpaceNodeServer{}
}
func (rsns *ReclaimSpaceNodeServer) RegisterService(server grpc.ServiceRegistrar) {
rs.RegisterReclaimSpaceNodeServer(server, rsns)
}
// NodeReclaimSpace runs fstrim or blkdiscard on the path where the volume is
// mounted or attached. When a volume with multi-node permissions is detected,
// an error is returned to prevent potential data corruption.
func (rsns *ReclaimSpaceNodeServer) NodeReclaimSpace(
ctx context.Context,
req *rs.NodeReclaimSpaceRequest) (*rs.NodeReclaimSpaceResponse, error) {
// volumeID is a required attribute, it is part of the path to run the
// space reducing command on
// nolint:ifshort // volumeID is incorrectly assumed to be used only once
volumeID := req.GetVolumeId()
if volumeID == "" {
return nil, status.Error(codes.InvalidArgument, "empty volume ID in request")
}
// path can either be the staging path on the node, or the volume path
// inside an application container
path := req.GetStagingTargetPath()
if path == "" {
path = req.GetVolumePath()
if path == "" {
return nil, status.Error(
codes.InvalidArgument,
"required parameter staging_target_path or volume_path is not set")
}
} else {
// append the right staging location used by this CSI-driver
path = fmt.Sprintf("%s/%s", path, volumeID)
}
// do not allow RWX block-mode volumes, danger of data corruption
isBlock, isMultiNode := csicommon.IsBlockMultiNode([]*csi.VolumeCapability{req.GetVolumeCapability()})
if isMultiNode {
return nil, status.Error(codes.Unimplemented, "multi-node space reclaim is not supported")
}
cmd := "fstrim"
if isBlock {
cmd = "blkdiscard"
}
_, stderr, err := util.ExecCommand(ctx, cmd, path)
if err != nil {
return nil, status.Errorf(
codes.Internal,
"failed to execute %q on %q (%s): %s",
cmd,
path,
err.Error(),
stderr)
}
return &rs.NodeReclaimSpaceResponse{}, nil
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
rs "github.com/csi-addons/spec/lib/go/reclaimspace"
)
// TestControllerReclaimSpace is a minimal test for the
// ControllerReclaimSpace() procedure. During unit-testing, there is no Ceph
// cluster available, so actual operations can not be performed.
func TestControllerReclaimSpace(t *testing.T) {
t.Parallel()
controller := NewReclaimSpaceControllerServer()
req := &rs.ControllerReclaimSpaceRequest{
VolumeId: "",
Secrets: nil,
}
_, err := controller.ControllerReclaimSpace(context.TODO(), req)
assert.Error(t, err)
}
// TestNodeReclaimSpace is a minimal test for the NodeReclaimSpace() procedure.
// During unit-testing, there is no Ceph cluster available, so actual
// operations can not be performed.
func TestNodeReclaimSpace(t *testing.T) {
t.Parallel()
node := NewReclaimSpaceNodeServer()
req := &rs.NodeReclaimSpaceRequest{
VolumeId: "",
VolumePath: "",
VolumeCapability: nil,
Secrets: nil,
}
_, err := node.NodeReclaimSpace(context.TODO(), req)
assert.Error(t, err)
}

View File

@ -25,6 +25,7 @@ import (
"google.golang.org/grpc"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util/log"
)
@ -86,7 +87,7 @@ func (cas *CSIAddonsServer) RegisterService(svc CSIAddonsService) {
// returned.
func (cas *CSIAddonsServer) Start() error {
// create the gRPC server and register services
cas.server = grpc.NewServer()
cas.server = grpc.NewServer(csicommon.NewMiddlewareServerOption(false))
for _, svc := range cas.services {
svc.RegisterService(cas.server)

View File

@ -27,7 +27,6 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/csi-addons/spec/lib/go/replication"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
@ -104,12 +103,8 @@ func (s *nonBlockingGRPCServer) serve(endpoint, hstOptions string, srv Servers,
klog.Fatalf("Failed to listen: %v", err)
}
middleWare := []grpc.UnaryServerInterceptor{contextIDInjector, logGRPC, panicHandler}
if metrics {
middleWare = append(middleWare, grpc_prometheus.UnaryServerInterceptor)
}
opts := []grpc.ServerOption{
grpc_middleware.WithUnaryServerChain(middleWare...),
NewMiddlewareServerOption(metrics),
}
server := grpc.NewServer(opts...)

View File

@ -30,6 +30,8 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
rp "github.com/csi-addons/replication-lib-utils/protosanitizer"
"github.com/csi-addons/spec/lib/go/replication"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -105,6 +107,18 @@ func isReplicationRequest(req interface{}) bool {
return isReplicationRequest
}
// NewMiddlewareServerOption creates a new grpc.ServerOption that configures a
// common format for log messages and other gRPC related handlers.
func NewMiddlewareServerOption(withMetrics bool) grpc.ServerOption {
middleWare := []grpc.UnaryServerInterceptor{contextIDInjector, logGRPC, panicHandler}
if withMetrics {
middleWare = append(middleWare, grpc_prometheus.UnaryServerInterceptor)
}
return grpc_middleware.WithUnaryServerChain(middleWare...)
}
func getReqID(req interface{}) string {
// if req is nil empty string will be returned
reqID := ""
@ -306,3 +320,72 @@ func IsBlockMultiNode(caps []*csi.VolumeCapability) (bool, bool) {
return isBlock, isMultiNode
}
// IsFileRWO checks if it is of type RWO and file mode, if it is return value
// will be set to true.
func IsFileRWO(caps []*csi.VolumeCapability) bool {
// the return value has been set to true, if the volume is of file mode and if the capabilities are of RWO
// kind, ie SINGLE NODE but flexible to have one or more writers. This is also used as a validation in caller
// to preserve the backward compatibility we had with file mode RWO volumes.
// to preserve backward compatibility we allow RWO filemode, ideally SINGLE_NODE_WRITER check is good enough,
// however more granular level check could help us in future, so keeping it here as an additional measure.
for _, cap := range caps {
if cap.AccessMode != nil {
if cap.GetMount() != nil {
switch cap.AccessMode.Mode { //nolint:exhaustive // only check what we want
case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER:
return true
}
}
}
}
return false
}
// IsReaderOnly check and set return value true only when the access mode is `READER ONLY` regardless of file
// or block mode.
func IsReaderOnly(caps []*csi.VolumeCapability) bool {
for _, cap := range caps {
if cap.AccessMode != nil {
switch cap.AccessMode.Mode { //nolint:exhaustive // only check what we want
case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:
return true
}
}
}
return false
}
// IsBlockMultiWriter validates the volume capability slice against the access modes and access type.
// if the capability is of multi write the first return value will be set to true and if the request
// is of type block, the second return value will be set to true.
func IsBlockMultiWriter(caps []*csi.VolumeCapability) (bool, bool) {
// multiWriter has been set and returned after validating multi writer caps regardless of
// single or multi node access mode. The caps check is agnostic to whether it is a filesystem or block
// mode volume.
var multiWriter bool
// block has been set and returned if the passed in capability is of block volume mode.
var block bool
for _, cap := range caps {
if cap.AccessMode != nil {
switch cap.AccessMode.Mode { //nolint:exhaustive // only check what we want
case csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER:
multiWriter = true
}
}
if cap.GetBlock() != nil {
block = true
}
}
return multiWriter, block
}

View File

@ -177,3 +177,421 @@ func TestIsBlockMultiNode(t *testing.T) {
assert.Equal(t, isMultiNode, test.isMultiNode, test.name)
}
}
func TestIsFileRWO(t *testing.T) {
t.Parallel()
tests := []struct {
name string
caps []*csi.VolumeCapability
rwoFile bool
}{
{
name: "non valid",
caps: []*csi.VolumeCapability{
{
AccessMode: nil,
AccessType: nil,
},
},
rwoFile: false,
},
{
name: "single writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
},
},
},
rwoFile: true,
},
{
name: "single node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
},
},
},
rwoFile: true,
},
{
name: "multi node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
rwoFile: false,
},
{
name: "multi node multi reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
},
rwoFile: false,
},
{
name: "single node reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
},
rwoFile: false,
},
}
for _, tt := range tests {
newtt := tt
t.Run(newtt.name, func(t *testing.T) {
t.Parallel()
rwoFile := IsFileRWO(newtt.caps)
if rwoFile != newtt.rwoFile {
t.Errorf("IsFileRWO() rwofile = %v, want %v", rwoFile, newtt.rwoFile)
}
})
}
}
func TestIsBlockMultiWriter(t *testing.T) {
t.Parallel()
tests := []struct {
name string
caps []*csi.VolumeCapability
multiWriter bool
block bool
}{
{
name: "non valid",
caps: []*csi.VolumeCapability{
{
AccessMode: nil,
AccessType: nil,
},
},
multiWriter: false,
block: false,
},
{
name: "multi node multi writer block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
multiWriter: true,
block: true,
},
{
name: "single node multi writer block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
},
},
},
multiWriter: true,
block: true,
},
{
name: "single writer block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
},
},
},
multiWriter: false,
block: true,
},
{
name: "single writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
},
},
},
multiWriter: false,
block: false,
},
{
name: "single node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
},
},
},
multiWriter: true,
block: false,
},
{
name: "multi node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
multiWriter: true,
block: false,
},
{
name: "multi node multi reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
},
multiWriter: false,
block: false,
},
{
name: "single node reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
},
multiWriter: false,
block: false,
},
{
name: "multi node reader block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
},
multiWriter: false,
block: true,
},
{
name: "single node reader block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
},
multiWriter: false,
block: true,
},
}
for _, tt := range tests {
newtt := tt
t.Run(newtt.name, func(t *testing.T) {
t.Parallel()
multiWriter, block := IsBlockMultiWriter(newtt.caps)
if multiWriter != newtt.multiWriter {
t.Errorf("IsBlockMultiWriter() multiWriter = %v, want %v", multiWriter, newtt.multiWriter)
}
if block != newtt.block {
t.Errorf("IsBlockMultiWriter block = %v, want %v", block, newtt.block)
}
})
}
}
func TestIsReaderOnly(t *testing.T) {
t.Parallel()
tests := []struct {
name string
caps []*csi.VolumeCapability
roOnly bool
}{
{
name: "non valid",
caps: []*csi.VolumeCapability{
{
AccessMode: nil,
AccessType: nil,
},
},
roOnly: false,
},
{
name: "single writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
},
},
},
roOnly: false,
},
{
name: "single node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
},
},
},
roOnly: false,
},
{
name: "multi node multi writer FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
roOnly: false,
},
{
name: "multi node multi reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
},
roOnly: true,
},
{
name: "single node reader FS mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
},
roOnly: true,
},
{
name: "multi node reader block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
},
roOnly: true,
},
{
name: "single node reader block mode",
caps: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
},
roOnly: true,
},
}
for _, tt := range tests {
newtt := tt
t.Run(newtt.name, func(t *testing.T) {
t.Parallel()
roOnly := IsReaderOnly(newtt.caps)
if roOnly != newtt.roOnly {
t.Errorf("isReadOnly() roOnly = %v, want %v", roOnly, newtt.roOnly)
}
})
}
}

View File

@ -33,7 +33,7 @@ const (
// keyProtectMetadataDefaultSecretsName is the default name of the Kubernetes Secret
// that contains the credentials to access the Key Protect KMS. The name of
// the Secret can be configured by setting the `KMS_SECRET_NAME`
// the Secret can be configured by setting the `IBM_KP_SECRET_NAME`
// option.
//
// #nosec:G101, value not credential, just references token.
@ -43,17 +43,18 @@ const (
// the credentials to access the Key ProtectKMS.
//
// #nosec:G101, no hardcoded secret, this is a configuration key.
keyProtectSecretNameKey = "KMS_SECRET_NAME"
keyProtectRegionKey = "KP_REGION"
keyProtectSecretNameKey = "IBM_KP_SECRET_NAME"
keyProtectRegionKey = "IBM_KP_REGION"
keyProtectServiceInstanceID = "KP_SERVICE_INSTANCE_ID"
keyProtectServiceInstanceID = "IBM_KP_SERVICE_INSTANCE_ID"
keyProtectServiceBaseURL = "IBM_KP_BASE_URL"
keyProtectServiceTokenURL = "IBM_KP_TOKEN_URL" //nolint:gosec // only configuration key
// The following options are part of the Kubernetes Secrets.
// #nosec:G101, no hardcoded secrets, only configuration keys.
keyProtectServiceAPIKey = "KP_SERVICE_API_KEY"
KeyProtectCustomerRootKey = "KP_CUSTOMER_ROOT_KEY"
keyProtectSessionToken = "KP_SESSION_TOKEN"
keyProtectCRK = "KP_CRK_ARN"
keyProtectServiceAPIKey = "IBM_KP_SERVICE_API_KEY"
KeyProtectCustomerRootKey = "IBM_KP_CUSTOMER_ROOT_KEY"
keyProtectSessionToken = "IBM_KP_SESSION_TOKEN" //nolint:gosec // only configuration key
keyProtectCRK = "IBM_KP_CRK_ARN"
)
var _ = RegisterProvider(Provider{
@ -72,6 +73,8 @@ type KeyProtectKMS struct {
serviceAPIKey string
customerRootKey string
serviceInstanceID string
baseURL string
tokenURL string
region string
sessionToken string
crk string
@ -94,6 +97,20 @@ func initKeyProtectKMS(args ProviderInitArgs) (EncryptionKMS, error) {
return nil, err
}
err = setConfigString(&kms.baseURL, args.Config, keyProtectServiceBaseURL)
if errors.Is(err, errConfigOptionInvalid) {
return nil, err
} else if errors.Is(err, errConfigOptionMissing) {
kms.baseURL = kp.DefaultBaseURL
}
err = setConfigString(&kms.tokenURL, args.Config, keyProtectServiceTokenURL)
if errors.Is(err, errConfigOptionInvalid) {
return nil, err
} else if errors.Is(err, errConfigOptionMissing) {
kms.tokenURL = kp.DefaultTokenURL
}
// read the Kubernetes Secret with credentials
secrets, err := kms.getSecrets()
if err != nil {
@ -169,9 +186,10 @@ func (kms *KeyProtectKMS) RequiresDEKStore() DEKStoreType {
}
func (kms *KeyProtectKMS) getService() error {
// Use your Service API Key and your KeyProtect Service Instance ID to create a ClientConfig
// Use Service API Key and KeyProtect Service Instance ID to create a ClientConfig
cc := kp.ClientConfig{
BaseURL: kp.DefaultBaseURL,
BaseURL: kms.baseURL,
TokenURL: kms.tokenURL,
APIKey: kms.serviceAPIKey,
InstanceID: kms.serviceInstanceID,
}

View File

@ -58,18 +58,18 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
case errors.Is(err, ErrSnapNotFound):
// check temporary image needs flatten, if yes add task to flatten the
// temporary clone
err = tempClone.flattenRbdImage(ctx, rv.conn.Creds, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
// as the snapshot is not present, create new snapshot,clone and
// delete the temporary snapshot
err = createRBDClone(ctx, tempClone, rv, snap, rv.conn.Creds)
err = createRBDClone(ctx, tempClone, rv, snap)
if err != nil {
return false, err
}
// check image needs flatten, if yes add task to flatten the clone
err = rv.flattenRbdImage(ctx, rv.conn.Creds, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
@ -115,7 +115,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
return false, err
}
// check image needs flatten, if yes add task to flatten the clone
err = rv.flattenRbdImage(ctx, rv.conn.Creds, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
@ -212,14 +212,14 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
cloneSnap.Pool = rv.Pool
// create snapshot and temporary clone and delete snapshot
err := createRBDClone(ctx, parentVol, tempClone, tempSnap, rv.conn.Creds)
err := createRBDClone(ctx, parentVol, tempClone, tempSnap)
if err != nil {
return err
}
defer func() {
if err != nil || errClone != nil {
cErr := cleanUpSnapshot(ctx, tempClone, cloneSnap, rv, rv.conn.Creds)
cErr := cleanUpSnapshot(ctx, tempClone, cloneSnap, rv)
if cErr != nil {
log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", cloneSnap, tempClone, cErr)
}
@ -228,7 +228,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
if err != nil || errFlatten != nil {
if !errors.Is(errFlatten, ErrFlattenInProgress) {
// cleanup snapshot
cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone, rv.conn.Creds)
cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone)
if cErr != nil {
log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempSnap, tempClone, cErr)
}
@ -243,7 +243,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
}
} else {
// flatten clone
errFlatten = tempClone.flattenRbdImage(ctx, rv.conn.Creds, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
errFlatten = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if errFlatten != nil {
return errFlatten
}
@ -251,7 +251,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
// create snap of temp clone from temporary cloned image
// create final clone
// delete snap of temp clone
errClone = createRBDClone(ctx, tempClone, rv, cloneSnap, rv.conn.Creds)
errClone = createRBDClone(ctx, tempClone, rv, cloneSnap)
if errClone != nil {
// set errFlatten error to cleanup temporary snapshot and temporary clone
errFlatten = errors.New("failed to create user requested cloned image")
@ -288,11 +288,11 @@ func (rv *rbdVolume) flattenCloneImage(ctx context.Context) error {
}
err := tempClone.getImageInfo()
if err == nil {
return tempClone.flattenRbdImage(ctx, tempClone.conn.Creds, false, hardLimit, softLimit)
return tempClone.flattenRbdImage(ctx, false, hardLimit, softLimit)
}
if !errors.Is(err, ErrImageNotFound) {
return err
}
return rv.flattenRbdImage(ctx, rv.conn.Creds, false, hardLimit, softLimit)
return rv.flattenRbdImage(ctx, false, hardLimit, softLimit)
}

View File

@ -101,11 +101,17 @@ func (cs *ControllerServer) parseVolCreateRequest(
req *csi.CreateVolumeRequest) (*rbdVolume, error) {
// TODO (sbezverk) Last check for not exceeding total storage capacity
// RO modes need to be handled independently (ie right now even if access mode is RO, they'll be RW upon attach)
isBlock, isMultiNode := csicommon.IsBlockMultiNode(req.VolumeCapabilities)
// below capability check indicates that we support both {SINGLE_NODE or MULTI_NODE} WRITERs and the `isMultiWriter`
// flag has been set accordingly.
isMultiWriter, isBlock := csicommon.IsBlockMultiWriter(req.VolumeCapabilities)
// below return value has set, if it is RWO mode File PVC.
isRWOFile := csicommon.IsFileRWO(req.VolumeCapabilities)
// below return value has set, if it is ReadOnly capability.
isROOnly := csicommon.IsReaderOnly(req.VolumeCapabilities)
// We want to fail early if the user is trying to create a RWX on a non-block type device
if isMultiNode && !isBlock {
if !isRWOFile && !isBlock && !isROOnly {
return nil, status.Error(
codes.InvalidArgument,
"multi node access modes are only supported on rbd `block` type volumes")
@ -115,11 +121,13 @@ func (cs *ControllerServer) parseVolCreateRequest(
return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures")
}
// if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks
// if it's NOT SINGLE_NODE_WRITER, and it's BLOCK we'll set the parameter to ignore the in-use checks
rbdVol, err := genVolFromVolumeOptions(
ctx,
req.GetParameters(), req.GetSecrets(),
(isMultiNode && isBlock), false)
req.GetParameters(),
req.GetSecrets(),
isMultiWriter && isBlock,
false)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
@ -363,7 +371,7 @@ func (cs *ControllerServer) repairExistingVolume(ctx context.Context, req *csi.C
rbdVol,
err)
} else if !thick {
err = deleteImage(ctx, rbdVol, cr)
err = rbdVol.deleteImage(ctx)
if err != nil {
return nil, status.Errorf(codes.Aborted, "failed to remove partially cloned volume %q: %s", rbdVol, err)
}
@ -434,7 +442,7 @@ func cleanupThickClone(ctx context.Context,
parentVol *rbdVolume,
rbdSnap *rbdSnapshot,
cr *util.Credentials) error {
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, rbdVol, cr)
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, rbdVol)
if err != nil {
return status.Errorf(codes.Internal, "failed to remove partially cloned volume %q: %s", rbdVol, err)
}
@ -521,12 +529,12 @@ func flattenTemporaryClonedImages(ctx context.Context, rbdVol *rbdVolume, cr *ut
// return success,the hardlimit is reached it starts a task to flatten the
// image and return Aborted.
func checkFlatten(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
err := rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err := rbdVol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
if errors.Is(err, ErrFlattenInProgress) {
return status.Error(codes.Aborted, err.Error())
}
if errDefer := deleteImage(ctx, rbdVol, cr); errDefer != nil {
if errDefer := rbdVol.deleteImage(ctx); errDefer != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, errDefer)
return status.Error(codes.Internal, err.Error())
@ -660,7 +668,7 @@ func (cs *ControllerServer) createBackingImage(
defer func() {
if err != nil {
if !errors.Is(err, ErrFlattenInProgress) {
if deleteErr := deleteImage(ctx, rbdVol, cr); deleteErr != nil {
if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
}
}
@ -672,7 +680,7 @@ func (cs *ControllerServer) createBackingImage(
}
if rbdSnap != nil {
err = rbdVol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = rbdVol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
log.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err)
@ -919,7 +927,7 @@ func cleanupRBDImage(ctx context.Context,
// delete the temporary rbd image created as part of volume clone during
// create volume
tempClone := rbdVol.generateTempClone()
err = deleteImage(ctx, tempClone, cr)
err = tempClone.deleteImage(ctx)
if err != nil {
if errors.Is(err, ErrImageNotFound) {
err = tempClone.ensureImageCleanup(ctx)
@ -937,7 +945,7 @@ func cleanupRBDImage(ctx context.Context,
// Deleting rbd image
log.DebugLog(ctx, "deleting image %s", rbdVol.RbdImageName)
if err = deleteImage(ctx, rbdVol, cr); err != nil {
if err = rbdVol.deleteImage(ctx); err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v",
rbdVol, err)
@ -1142,7 +1150,7 @@ func cloneFromSnapshot(
}
}
err = vol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = vol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if errors.Is(err, ErrFlattenInProgress) {
// if flattening is in progress, return error and do not cleanup
return nil, status.Errorf(codes.Internal, err.Error())
@ -1210,7 +1218,7 @@ func (cs *ControllerServer) doSnapshotClone(
return cloneRbd, err
}
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap, cr)
err = createRBDClone(ctx, parentVol, cloneRbd, rbdSnap)
if err != nil {
log.ErrorLog(ctx, "failed to create snapshot: %v", err)
@ -1221,7 +1229,7 @@ func (cs *ControllerServer) doSnapshotClone(
if err != nil {
if !errors.Is(err, ErrFlattenInProgress) {
// cleanup clone and snapshot
errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd, cr)
errCleanUp := cleanUpSnapshot(ctx, cloneRbd, rbdSnap, cloneRbd)
if errCleanUp != nil {
log.ErrorLog(ctx, "failed to cleanup snapshot and clone: %v", errCleanUp)
}
@ -1287,7 +1295,7 @@ func (cs *ControllerServer) doSnapshotClone(
return cloneRbd, err
}
err = cloneRbd.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = cloneRbd.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return cloneRbd, err
}
@ -1387,7 +1395,7 @@ func (cs *ControllerServer) DeleteSnapshot(
rbdVol.ImageID = rbdSnap.ImageID
// update parent name to delete the snapshot
rbdSnap.RbdImageName = rbdVol.RbdImageName
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol, cr)
err = cleanUpSnapshot(ctx, rbdVol, rbdSnap, rbdVol)
if err != nil {
log.ErrorLog(ctx, "failed to delete image: %v", err)

44
internal/rbd/diskusage.go Normal file
View File

@ -0,0 +1,44 @@
/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
)
// Sparsify checks the size of the objects in the RBD image and calls
// rbd_sparify() to free zero-filled blocks and reduce the storage consumption
// of the image.
func (ri *rbdImage) Sparsify() error {
image, err := ri.open()
if err != nil {
return err
}
defer image.Close()
imageInfo, err := image.Stat()
if err != nil {
return err
}
err = image.Sparsify(1 << imageInfo.Order)
if err != nil {
return fmt.Errorf("failed to sparsify image: %w", err)
}
return nil
}

View File

@ -125,6 +125,8 @@ func (r *Driver) Run(conf *util.Config) {
[]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
})
}
@ -213,6 +215,19 @@ func (r *Driver) setupCSIAddonsServer(conf *util.Config) error {
is := casrbd.NewIdentityServer(conf)
r.cas.RegisterService(is)
if conf.IsControllerServer {
rs := casrbd.NewReclaimSpaceControllerServer()
r.cas.RegisterService(rs)
fcs := casrbd.NewFenceControllerServer()
r.cas.RegisterService(fcs)
}
if conf.IsNodeServer {
rs := casrbd.NewReclaimSpaceNodeServer()
r.cas.RegisterService(rs)
}
// start the server, this does not block, it runs a new go-routine
err = r.cas.Start()
if err != nil {

View File

@ -82,7 +82,7 @@ func deleteMigratedVolume(ctx context.Context, parsedMigHandle *migrationVolID,
return err
}
defer rv.Destroy()
err = deleteImage(ctx, rv, cr)
err = rv.deleteImage(ctx)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s, err: %v", rv, err)
}

View File

@ -375,7 +375,7 @@ func (ns *NodeServer) stageTransaction(
volOptions.readOnly = true
}
err = flattenImageBeforeMapping(ctx, volOptions, cr)
err = flattenImageBeforeMapping(ctx, volOptions)
if err != nil {
return transaction, err
}
@ -527,8 +527,7 @@ func resizeEncryptedDevice(ctx context.Context, volID, stagingTargetPath, device
func flattenImageBeforeMapping(
ctx context.Context,
volOptions *rbdVolume,
cr *util.Credentials) error {
volOptions *rbdVolume) error {
var err error
var feature bool
var depth uint
@ -550,7 +549,7 @@ func flattenImageBeforeMapping(
return err
}
if feature || depth != 0 {
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = volOptions.flattenRbdImage(ctx, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return err
}
@ -1087,6 +1086,13 @@ func (ns *NodeServer) NodeGetCapabilities(
},
},
},
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
},
},
},
},
}, nil
}

View File

@ -56,9 +56,8 @@ const (
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
// command is not supported by ceph manager. Used to check errors and recover when the command
// is unsupported.
rbdTaskRemoveCmdInvalidString1 = "no valid command found"
rbdTaskRemoveCmdInvalidString2 = "Error EINVAL: invalid command"
rbdTaskRemoveCmdAccessDeniedMessage = "Error EACCES:"
rbdTaskRemoveCmdInvalidString = "No handler found"
rbdTaskRemoveCmdAccessDeniedMessage = "access denied:"
// image metadata key for thick-provisioning.
// As image metadata key starting with '.rbd' will not be copied when we do
@ -388,7 +387,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
if err != nil {
// nolint:errcheck // deleteImage() will log errors in
// case it fails, no need to log them here again
_ = deleteImage(ctx, pOpts, cr)
_ = pOpts.deleteImage(ctx)
return fmt.Errorf("failed to thick provision image: %w", err)
}
@ -397,7 +396,7 @@ func createImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
if err != nil {
// nolint:errcheck // deleteImage() will log errors in
// case it fails, no need to log them here again
_ = deleteImage(ctx, pOpts, cr)
_ = pOpts.deleteImage(ctx)
return fmt.Errorf("failed to mark image as thick-provisioned: %w", err)
}
@ -595,53 +594,27 @@ func isNotMountPoint(mounter mount.Interface, stagingTargetPath string) (bool, e
return isNotMnt, err
}
// addRbdManagerTask adds a ceph manager task to execute command
// asynchronously. If command is not found returns a bool set to false
// example arg ["trash", "remove","pool/image"].
func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (bool, error) {
args := []string{"rbd", "task", "add"}
args = append(args, arg...)
log.DebugLog(
ctx,
"executing %v for image (%s) using mon %s, pool %s",
args,
pOpts.RbdImageName,
pOpts.Monitors,
pOpts.Pool)
supported := true
_, stderr, err := util.ExecCommand(ctx, "ceph", args...)
if err != nil {
// isCephMgrSupported determines if the cluster has support for MGR based operation
// depending on the error.
func isCephMgrSupported(ctx context.Context, clusterID string, err error) bool {
switch {
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
strings.Contains(stderr, rbdTaskRemoveCmdInvalidString2):
case err == nil:
return true
case strings.Contains(err.Error(), rbdTaskRemoveCmdInvalidString):
log.WarningLog(
ctx,
"cluster with cluster ID (%s) does not support Ceph manager based rbd commands"+
"(minimum ceph version required is v14.2.3)",
pOpts.ClusterID)
supported = false
case strings.HasPrefix(stderr, rbdTaskRemoveCmdAccessDeniedMessage):
log.WarningLog(ctx, "access denied to Ceph MGR-based rbd commands on cluster ID (%s)", pOpts.ClusterID)
supported = false
default:
log.WarningLog(ctx, "uncaught error while scheduling a task (%v): %s", err, stderr)
}
}
if err != nil {
err = fmt.Errorf("%w. stdError:%s", err, stderr)
clusterID)
return false
case strings.Contains(err.Error(), rbdTaskRemoveCmdAccessDeniedMessage):
log.WarningLog(ctx, "access denied to Ceph MGR-based rbd commands on cluster ID (%s)", clusterID)
return false
}
return supported, err
}
// getTrashPath returns the image path for trash operation.
func (rv *rbdVolume) getTrashPath() string {
trashPath := rv.Pool
if rv.RadosNamespace != "" {
trashPath = trashPath + "/" + rv.RadosNamespace
}
return trashPath + "/" + rv.ImageID
return true
}
// ensureImageCleanup finds image in trash and if found removes it
@ -657,7 +630,7 @@ func (rv *rbdVolume) ensureImageCleanup(ctx context.Context) error {
if val.Name == rv.RbdImageName {
rv.ImageID = val.Id
return trashRemoveImage(ctx, rv, rv.conn.Creds)
return rv.trashRemoveImage(ctx)
}
}
@ -665,66 +638,69 @@ func (rv *rbdVolume) ensureImageCleanup(ctx context.Context) error {
}
// deleteImage deletes a ceph image with provision and volume options.
func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
image := pOpts.RbdImageName
func (rv *rbdVolume) deleteImage(ctx context.Context) error {
image := rv.RbdImageName
log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, pOpts.Monitors, pOpts.Pool)
log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, rv.Monitors, rv.Pool)
// Support deleting the older rbd images whose imageID is not stored in omap
err := pOpts.getImageID()
err := rv.getImageID()
if err != nil {
return err
}
if pOpts.isEncrypted() {
log.DebugLog(ctx, "rbd: going to remove DEK for %q", pOpts)
if err = pOpts.encryption.RemoveDEK(pOpts.VolID); err != nil {
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", pOpts.VolID, err)
if rv.isEncrypted() {
log.DebugLog(ctx, "rbd: going to remove DEK for %q", rv)
if err = rv.encryption.RemoveDEK(rv.VolID); err != nil {
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", rv.VolID, err)
}
}
err = pOpts.openIoctx()
err = rv.openIoctx()
if err != nil {
return err
}
rbdImage := librbd.GetImage(pOpts.ioctx, image)
rbdImage := librbd.GetImage(rv.ioctx, image)
err = rbdImage.Trash(0)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", pOpts, err)
log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", rv, err)
return err
}
return trashRemoveImage(ctx, pOpts, cr)
return rv.trashRemoveImage(ctx)
}
// trashRemoveImage adds a task to trash remove an image using ceph manager if supported,
// otherwise removes the image from trash.
func trashRemoveImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) error {
func (rv *rbdVolume) trashRemoveImage(ctx context.Context) error {
// attempt to use Ceph manager based deletion support if available
log.DebugLog(ctx, "rbd: adding task to remove image %q with id %q from trash", rv, rv.ImageID)
args := []string{
"trash", "remove",
pOpts.getTrashPath(),
"--id", cr.ID,
"--keyfile=" + cr.KeyFile,
"-m", pOpts.Monitors,
ta, err := rv.conn.GetTaskAdmin()
if err != nil {
return err
}
rbdCephMgrSupported, err := addRbdManagerTask(ctx, pOpts, args)
_, err = ta.AddTrashRemove(admin.NewImageSpec(rv.Pool, rv.RadosNamespace, rv.ImageID))
rbdCephMgrSupported := isCephMgrSupported(ctx, rv.ClusterID, err)
if rbdCephMgrSupported && err != nil {
log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", pOpts, err)
log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", rv, err)
return err
}
if !rbdCephMgrSupported {
err = librbd.TrashRemove(pOpts.ioctx, pOpts.ImageID, true)
err = librbd.TrashRemove(rv.ioctx, rv.ImageID, true)
if err != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", pOpts, err)
log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", rv, err)
return err
}
} else {
log.DebugLog(ctx, "rbd: successfully added task to move image %q with id %q to trash", rv, rv.ImageID)
}
return nil
@ -813,7 +789,7 @@ func flattenClonedRbdImages(
for _, snapName := range origNameList {
rv.RbdImageName = snapName.origSnapName
err = rv.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
err = rv.flattenRbdImage(ctx, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
log.ErrorLog(ctx, "failed to flatten %s; err %v", rv, err)
@ -826,7 +802,6 @@ func flattenClonedRbdImages(
func (rv *rbdVolume) flattenRbdImage(
ctx context.Context,
cr *util.Credentials,
forceFlatten bool,
hardlimit, softlimit uint) error {
var depth uint
@ -850,9 +825,17 @@ func (rv *rbdVolume) flattenRbdImage(
if !forceFlatten && (depth < hardlimit) && (depth < softlimit) {
return nil
}
args := []string{"flatten", rv.String(), "--id", cr.ID, "--keyfile=" + cr.KeyFile, "-m", rv.Monitors}
supported, err := addRbdManagerTask(ctx, rv, args)
if supported {
log.DebugLog(ctx, "rbd: adding task to flatten image %q", rv)
ta, err := rv.conn.GetTaskAdmin()
if err != nil {
return err
}
_, err = ta.AddFlatten(admin.NewImageSpec(rv.Pool, rv.RadosNamespace, rv.RbdImageName))
rbdCephMgrSupported := isCephMgrSupported(ctx, rv.ClusterID, err)
if rbdCephMgrSupported {
if err != nil {
// discard flattening error if the image does not have any parent
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
@ -866,17 +849,14 @@ func (rv *rbdVolume) flattenRbdImage(
if forceFlatten || depth >= hardlimit {
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, rv.RbdImageName)
}
log.DebugLog(ctx, "successfully added task to flatten image %q", rv)
}
if !supported {
if !rbdCephMgrSupported {
log.ErrorLog(
ctx,
"task manager does not support flatten,image will be flattened once hardlimit is reached: %v",
err)
if forceFlatten || depth >= hardlimit {
err = rv.Connect(cr)
if err != nil {
return err
}
err := rv.flatten()
if err != nil {
log.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)

View File

@ -349,7 +349,7 @@ func repairDummyImage(ctx context.Context, dummyVol *rbdVolume) error {
// deleting and recreating the dummy image will not impact anything as its
// a workaround to fix the scheduling problem.
err := deleteImage(ctx, dummyVol, dummyVol.conn.Creds)
err := dummyVol.deleteImage(ctx)
if err != nil {
return err
}

View File

@ -27,8 +27,7 @@ import (
func createRBDClone(
ctx context.Context,
parentVol, cloneRbdVol *rbdVolume,
snap *rbdSnapshot,
cr *util.Credentials) error {
snap *rbdSnapshot) error {
// create snapshot
err := parentVol.createSnapshot(ctx, snap)
if err != nil {
@ -56,7 +55,7 @@ func createRBDClone(
errSnap := parentVol.deleteSnapshot(ctx, snap)
if errSnap != nil {
log.ErrorLog(ctx, "failed to delete snapshot: %v", errSnap)
delErr := deleteImage(ctx, cloneRbdVol, cr)
delErr := cloneRbdVol.deleteImage(ctx)
if delErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", cloneRbdVol, delErr)
}
@ -73,8 +72,7 @@ func cleanUpSnapshot(
ctx context.Context,
parentVol *rbdVolume,
rbdSnap *rbdSnapshot,
rbdVol *rbdVolume,
cr *util.Credentials) error {
rbdVol *rbdVolume) error {
err := parentVol.deleteSnapshot(ctx, rbdSnap)
if err != nil {
if !errors.Is(err, ErrSnapNotFound) {
@ -85,7 +83,7 @@ func cleanUpSnapshot(
}
if rbdVol != nil {
err := deleteImage(ctx, rbdVol, cr)
err := rbdVol.deleteImage(ctx)
if err != nil {
if !errors.Is(err, ErrImageNotFound) {
log.ErrorLog(ctx, "failed to delete rbd image %q with error: %v", rbdVol, err)
@ -122,7 +120,7 @@ func undoSnapshotCloning(
rbdSnap *rbdSnapshot,
cloneVol *rbdVolume,
cr *util.Credentials) error {
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol, cr)
err := cleanUpSnapshot(ctx, parentVol, rbdSnap, cloneVol)
if err != nil {
log.ErrorLog(ctx, "failed to clean up %s or %s: %v", cloneVol, rbdSnap, err)

View File

@ -131,6 +131,16 @@ func (cc *ClusterConnection) GetRBDAdmin() (*ra.RBDAdmin, error) {
return ra.NewFromConn(cc.conn), nil
}
// GetTaskAdmin returns TaskAdmin to add tasks on rbd images.
func (cc *ClusterConnection) GetTaskAdmin() (*ra.TaskAdmin, error) {
rbdAdmin, err := cc.GetRBDAdmin()
if err != nil {
return nil, err
}
return rbdAdmin.Task(), nil
}
// DisableDiscardOnZeroedWriteSame enables the
// `rbd_discard_on_zeroed_write_same` option in the cluster connection, so that
// writing zero blocks of data are actual writes on the OSDs (doing

View File

@ -43,6 +43,15 @@ minikube_version() {
echo "${MINIKUBE_VERSION}" | sed 's/^v//' | cut -d'.' -f"${1}"
}
# parse the kubernetes version, return the digit passed as argument
# v1.21.0 -> kube_version 1 -> 1
# v1.21.0 -> kube_version 2 -> 21
# v1.21.0 -> kube_version 3 -> 0
kube_version() {
echo "${KUBE_VERSION}" | sed 's/^v//' | cut -d'.' -f"${1}"
}
# detect if there is a minikube executable available already. If there is none,
# fallback to using /usr/local/bin/minikube, as that is where
# install_minikube() will place it too.
@ -203,6 +212,13 @@ up)
disable_storage_addons
# get kubernetes version we are operating on and accordingly enable feature gates
KUBE_MAJOR=$(kube_version 1)
KUBE_MINOR=$(kube_version 2)
if [ "${KUBE_MAJOR}" -eq 1 ] && [ "${KUBE_MINOR}" -ge 22 ];then
# if kubernetes version is greater than 1.22 enable RWOP feature gate
K8S_FEATURE_GATES="${K8S_FEATURE_GATES},ReadWriteOncePod=true"
fi
# shellcheck disable=SC2086
${minikube} start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" --cni="${CNI}" ${EXTRA_CONFIG} ${EXTRA_CONFIG_PSP} --wait-timeout="${MINIKUBE_WAIT_TIMEOUT}" --wait="${MINIKUBE_WAIT}" --delete-on-failure ${DISK_CONFIG}

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
// For APIs that accept extra sets of "boolean" flags we may end up wanting

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -52,3 +52,32 @@ func (fsa *FSAdmin) EnableMirroringModule(force bool) error {
func (fsa *FSAdmin) DisableMirroringModule() error {
return fsa.DisableModule(mirroring)
}
type moduleInfo struct {
EnabledModules []string `json:"enabled_modules"`
//DisabledModules []string `json:"disabled_modules"`
// DisabledModules is documented in ceph as a list of string
// but that's not what comes back from the server (on pacific).
// Since we don't need this today, we're just going to ignore
// it, but if we ever want to support this for external consumers
// we'll need to figure out the real structure of this.
}
func parseModuleInfo(res response) (*moduleInfo, error) {
m := &moduleInfo{}
if err := res.NoStatus().Unmarshal(m).End(); err != nil {
return nil, err
}
return m, nil
}
// listModules returns moduleInfo or error. it is not exported because
// this is really not a cephfs specific thing but we needed it
// for cephfs tests. maybe lift it somewhere else someday.
func (fsa *FSAdmin) listModules() (*moduleInfo, error) {
m := map[string]string{
"prefix": "mgr module ls",
"format": "json",
}
return parseModuleInfo(commands.MarshalMonCommand(fsa.conn, m))
}

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
// this is the internal type used to create JSON for ceph.

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
// this is the internal type used to create JSON for ceph.

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package admin
import (

View File

@ -23,6 +23,9 @@ const (
SizeTSize = C.sizeof_size_t
)
// Compile-time assertion ensuring that Go's `int` is at least as large as C's.
const _ = unsafe.Sizeof(int(0)) - C.sizeof_int
// SizeT wraps size_t from C.
type SizeT C.size_t

View File

@ -99,8 +99,8 @@ type GetOmapStep struct {
// C returned data:
iter C.rados_omap_iter_t
more C.uchar
rval C.int
more *C.uchar
rval *C.int
// internal state:
@ -116,6 +116,8 @@ func newGetOmapStep(startAfter, filterPrefix string, maxReturn uint64) *GetOmapS
maxReturn: maxReturn,
cStartAfter: C.CString(startAfter),
cFilterPrefix: C.CString(filterPrefix),
more: (*C.uchar)(C.malloc(C.sizeof_uchar)),
rval: (*C.int)(C.malloc(C.sizeof_int)),
}
runtime.SetFinalizer(gos, opStepFinalizer)
return gos
@ -127,8 +129,10 @@ func (gos *GetOmapStep) free() {
C.rados_omap_get_end(gos.iter)
}
gos.iter = nil
gos.more = 0
gos.rval = 0
C.free(unsafe.Pointer(gos.more))
gos.more = nil
C.free(unsafe.Pointer(gos.rval))
gos.rval = nil
C.free(unsafe.Pointer(gos.cStartAfter))
gos.cStartAfter = nil
C.free(unsafe.Pointer(gos.cFilterPrefix))
@ -136,7 +140,7 @@ func (gos *GetOmapStep) free() {
}
func (gos *GetOmapStep) update() error {
err := getError(gos.rval)
err := getError(*gos.rval)
gos.canIterate = (err == nil)
return err
}
@ -168,7 +172,7 @@ func (gos *GetOmapStep) Next() (*OmapKeyValue, error) {
func (gos *GetOmapStep) More() bool {
// tad bit hacky, but go can't automatically convert from
// unsigned char to bool
return gos.more != 0
return *gos.more != 0
}
// removeOmapKeysStep is a write operation step used to track state, especially

View File

@ -77,8 +77,8 @@ func (r *ReadOp) GetOmapValues(startAfter, filterPrefix string, maxReturn uint64
gos.cFilterPrefix,
C.uint64_t(gos.maxReturn),
&gos.iter,
&gos.more,
&gos.rval,
gos.more,
gos.rval,
)
return gos
}

View File

@ -0,0 +1,63 @@
//go:build ceph_preview
// +build ceph_preview
package rados
// #cgo LDFLAGS: -lrados
// #include <rados/librados.h>
// #include <stdlib.h>
//
import "C"
import (
"unsafe"
)
// WriteOpCmpExtStep holds result of the CmpExt write operation.
// Result is valid only after Operate() was called.
type WriteOpCmpExtStep struct {
// C returned data:
prval *C.int
// Result of the CmpExt write operation.
Result int
}
func (s *WriteOpCmpExtStep) update() error {
s.Result = int(*s.prval)
return nil
}
func (s *WriteOpCmpExtStep) free() {
C.free(unsafe.Pointer(s.prval))
s.prval = nil
}
func newWriteOpCmpExtStep() *WriteOpCmpExtStep {
return &WriteOpCmpExtStep{
prval: (*C.int)(C.malloc(C.sizeof_int)),
}
}
// CmpExt ensures that given object range (extent) satisfies comparison.
// PREVIEW
//
// Implements:
// void rados_write_op_cmpext(rados_write_op_t write_op,
// const char * cmp_buf,
// size_t cmp_len,
// uint64_t off,
// int * prval);
func (w *WriteOp) CmpExt(b []byte, offset uint64) *WriteOpCmpExtStep {
oe := newWriteStep(b, 0, offset)
cmpExtStep := newWriteOpCmpExtStep()
w.steps = append(w.steps, oe, cmpExtStep)
C.rados_write_op_cmpext(
w.op,
oe.cBuffer,
oe.cDataLen,
oe.cOffset,
cmpExtStep.prval)
return cmpExtStep
}

View File

@ -25,7 +25,7 @@ type writeStep struct {
func newWriteStep(b []byte, writeLen, offset uint64) *writeStep {
return &writeStep{
b: b,
cBuffer: (*C.char)(unsafe.Pointer(&b[0])),
cBuffer: (*C.char)(unsafe.Pointer(&b[0])), // TODO: must be pinned
cDataLen: C.size_t(len(b)),
cWriteLen: C.size_t(writeLen),
cOffset: C.uint64_t(offset),

View File

@ -1,5 +1,5 @@
//go:build !nautilus && ceph_preview
// +build !nautilus,ceph_preview
//go:build !nautilus
// +build !nautilus
package admin
@ -10,14 +10,12 @@ import (
// ImageSpec values are used to identify an RBD image wherever Ceph APIs
// require an image_spec/image_id_spec using image name/id and optional
// pool and namespace.
// PREVIEW
type ImageSpec struct {
spec string
}
// NewImageSpec is used to construct an ImageSpec given an image name/id
// and optional namespace and pool names.
// PREVIEW
//
// NewImageSpec constructs an ImageSpec to identify an RBD image and thus
// requires image name/id, whereas NewLevelSpec constructs LevelSpec to
@ -37,7 +35,6 @@ func NewImageSpec(pool, namespace, image string) ImageSpec {
// NewRawImageSpec returns a ImageSpec directly based on the spec string
// argument without constructing it from component values.
// PREVIEW
//
// This should only be used if NewImageSpec can not create the imagespec value
// you want to pass to ceph.

View File

@ -1,5 +1,5 @@
//go:build !nautilus && ceph_preview
// +build !nautilus,ceph_preview
//go:build !nautilus
// +build !nautilus
package admin
@ -9,19 +9,16 @@ import (
)
// TaskAdmin encapsulates management functions for ceph rbd task operations.
// PREVIEW
type TaskAdmin struct {
conn ccom.MgrCommander
}
// Task returns a TaskAdmin type for managing ceph rbd task operations.
// PREVIEW
func (ra *RBDAdmin) Task() *TaskAdmin {
return &TaskAdmin{conn: ra.conn}
}
// TaskRefs contains the action name and information about the image.
// PREVIEW
type TaskRefs struct {
Action string `json:"action"`
PoolName string `json:"pool_name"`
@ -31,7 +28,6 @@ type TaskRefs struct {
}
// TaskResponse contains the information about the task added on an image.
// PREVIEW
type TaskResponse struct {
Sequence int `json:"sequence"`
ID string `json:"id"`
@ -58,7 +54,6 @@ func parseTaskResponseList(res commands.Response) ([]TaskResponse, error) {
// AddFlatten adds a background task to flatten a cloned image based on the
// supplied image spec.
// PREVIEW
//
// Similar To:
// rbd task add flatten <image_spec>
@ -73,7 +68,6 @@ func (ta *TaskAdmin) AddFlatten(img ImageSpec) (TaskResponse, error) {
// AddRemove adds a background task to remove an image based on the supplied
// image spec.
// PREVIEW
//
// Similar To:
// rbd task add remove <image_spec>
@ -88,7 +82,6 @@ func (ta *TaskAdmin) AddRemove(img ImageSpec) (TaskResponse, error) {
// AddTrashRemove adds a background task to remove an image from the trash based
// on the supplied image id spec.
// PREVIEW
//
// Similar To:
// rbd task add trash remove <image_id_spec>
@ -102,7 +95,6 @@ func (ta *TaskAdmin) AddTrashRemove(img ImageSpec) (TaskResponse, error) {
}
// List pending or running asynchronous tasks.
// PREVIEW
//
// Similar To:
// rbd task list
@ -115,7 +107,6 @@ func (ta *TaskAdmin) List() ([]TaskResponse, error) {
}
// GetTaskByID returns pending or running asynchronous task using id.
// PREVIEW
//
// Similar To:
// rbd task list <task_id>
@ -129,7 +120,6 @@ func (ta *TaskAdmin) GetTaskByID(taskID string) (TaskResponse, error) {
}
// Cancel a pending or running asynchronous task.
// PREVIEW
//
// Similar To:
// rbd task cancel <task_id>

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
package rbd
// #include <rbd/librbd.h>

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
//
// Ceph Nautilus is the first release that includes rbd_namespace_create(),
// rbd_namespace_remove(), rbd_namespace_exists() and rbd_namespace_list().

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
//
// Ceph Nautilus is the first release that includes rbd_pool_metadata_get(),
// rbd_pool_metadata_set() and rbd_pool_metadata_remove().

View File

@ -951,6 +951,11 @@ func (image *Image) GetId() (string, error) {
}
// GetName returns the image name.
func (image *Image) GetName() string {
return image.name
}
// SetSnapshot updates the rbd image (not the Snapshot) such that the snapshot
// is the source of readable data.
//

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
//
// Ceph Nautilus is the first release that includes rbd_list2() and
// rbd_get_create_timestamp().

View File

@ -1,6 +1,3 @@
//go:build !luminous
// +build !luminous
//
// Ceph Mimic introduced rbd_snap_get_namespace_type().

View File

@ -1,6 +1,3 @@
//go:build !luminous && !mimic
// +build !luminous,!mimic
//
// Ceph Nautilus introduced rbd_get_parent() and deprecated rbd_get_parent_info().
// Ceph Nautilus introduced rbd_list_children3() and deprecated rbd_list_children().

View File

@ -0,0 +1,679 @@
// Code generated by make; DO NOT EDIT.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.14.0
// source: fence/fence.proto
package fence
import (
_ "github.com/container-storage-interface/spec/lib/go/csi"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
_ "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// FenceClusterNetworkRequest contains the information needed to identify
// the storage cluster so that the appropriate fencing operation can be
// performed.
type FenceClusterNetworkRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Plugin specific parameters passed in as opaque key-value pairs.
Parameters map[string]string `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Secrets required by the plugin to complete the request.
Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// list of CIDR blocks on which the fencing operation is expected to be
// performed.
Cidrs []*CIDR `protobuf:"bytes,3,rep,name=cidrs,proto3" json:"cidrs,omitempty"`
}
func (x *FenceClusterNetworkRequest) Reset() {
*x = FenceClusterNetworkRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FenceClusterNetworkRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FenceClusterNetworkRequest) ProtoMessage() {}
func (x *FenceClusterNetworkRequest) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FenceClusterNetworkRequest.ProtoReflect.Descriptor instead.
func (*FenceClusterNetworkRequest) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{0}
}
func (x *FenceClusterNetworkRequest) GetParameters() map[string]string {
if x != nil {
return x.Parameters
}
return nil
}
func (x *FenceClusterNetworkRequest) GetSecrets() map[string]string {
if x != nil {
return x.Secrets
}
return nil
}
func (x *FenceClusterNetworkRequest) GetCidrs() []*CIDR {
if x != nil {
return x.Cidrs
}
return nil
}
// FenceClusterNetworkResponse is returned by the CSI-driver as a result of
// the FenceClusterNetworkRequest call.
type FenceClusterNetworkResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *FenceClusterNetworkResponse) Reset() {
*x = FenceClusterNetworkResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FenceClusterNetworkResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FenceClusterNetworkResponse) ProtoMessage() {}
func (x *FenceClusterNetworkResponse) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FenceClusterNetworkResponse.ProtoReflect.Descriptor instead.
func (*FenceClusterNetworkResponse) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{1}
}
// UnfenceClusterNetworkRequest contains the information needed to identify
// the cluster so that the appropriate fence operation can be
// disabled.
type UnfenceClusterNetworkRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Plugin specific parameters passed in as opaque key-value pairs.
Parameters map[string]string `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Secrets required by the plugin to complete the request.
Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// list of CIDR blocks on which the fencing operation is expected to be
// performed.
Cidrs []*CIDR `protobuf:"bytes,3,rep,name=cidrs,proto3" json:"cidrs,omitempty"`
}
func (x *UnfenceClusterNetworkRequest) Reset() {
*x = UnfenceClusterNetworkRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UnfenceClusterNetworkRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnfenceClusterNetworkRequest) ProtoMessage() {}
func (x *UnfenceClusterNetworkRequest) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnfenceClusterNetworkRequest.ProtoReflect.Descriptor instead.
func (*UnfenceClusterNetworkRequest) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{2}
}
func (x *UnfenceClusterNetworkRequest) GetParameters() map[string]string {
if x != nil {
return x.Parameters
}
return nil
}
func (x *UnfenceClusterNetworkRequest) GetSecrets() map[string]string {
if x != nil {
return x.Secrets
}
return nil
}
func (x *UnfenceClusterNetworkRequest) GetCidrs() []*CIDR {
if x != nil {
return x.Cidrs
}
return nil
}
// UnfenceClusterNetworkResponse is returned by the CSI-driver as a result of
// the UnfenceClusterNetworkRequest call.
type UnfenceClusterNetworkResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *UnfenceClusterNetworkResponse) Reset() {
*x = UnfenceClusterNetworkResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UnfenceClusterNetworkResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnfenceClusterNetworkResponse) ProtoMessage() {}
func (x *UnfenceClusterNetworkResponse) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnfenceClusterNetworkResponse.ProtoReflect.Descriptor instead.
func (*UnfenceClusterNetworkResponse) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{3}
}
// ListClusterFenceRequest contains the information needed to identify
// the cluster so that the appropriate fenced clients can be listed.
type ListClusterFenceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Plugin specific parameters passed in as opaque key-value pairs.
Parameters map[string]string `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Secrets required by the plugin to complete the request.
Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *ListClusterFenceRequest) Reset() {
*x = ListClusterFenceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListClusterFenceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListClusterFenceRequest) ProtoMessage() {}
func (x *ListClusterFenceRequest) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListClusterFenceRequest.ProtoReflect.Descriptor instead.
func (*ListClusterFenceRequest) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{4}
}
func (x *ListClusterFenceRequest) GetParameters() map[string]string {
if x != nil {
return x.Parameters
}
return nil
}
func (x *ListClusterFenceRequest) GetSecrets() map[string]string {
if x != nil {
return x.Secrets
}
return nil
}
// ListClusterFenceResponse holds the information about the result of the
// ListClusterFenceResponse call.
type ListClusterFenceResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// list of IPs that are blocklisted by the SP.
Cidrs []*CIDR `protobuf:"bytes,1,rep,name=cidrs,proto3" json:"cidrs,omitempty"`
}
func (x *ListClusterFenceResponse) Reset() {
*x = ListClusterFenceResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListClusterFenceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListClusterFenceResponse) ProtoMessage() {}
func (x *ListClusterFenceResponse) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListClusterFenceResponse.ProtoReflect.Descriptor instead.
func (*ListClusterFenceResponse) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{5}
}
func (x *ListClusterFenceResponse) GetCidrs() []*CIDR {
if x != nil {
return x.Cidrs
}
return nil
}
// CIDR holds a CIDR block.
type CIDR struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// CIDR block
Cidr string `protobuf:"bytes,1,opt,name=cidr,proto3" json:"cidr,omitempty"`
}
func (x *CIDR) Reset() {
*x = CIDR{}
if protoimpl.UnsafeEnabled {
mi := &file_fence_fence_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CIDR) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CIDR) ProtoMessage() {}
func (x *CIDR) ProtoReflect() protoreflect.Message {
mi := &file_fence_fence_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CIDR.ProtoReflect.Descriptor instead.
func (*CIDR) Descriptor() ([]byte, []int) {
return file_fence_fence_proto_rawDescGZIP(), []int{6}
}
func (x *CIDR) GetCidr() string {
if x != nil {
return x.Cidr
}
return ""
}
var File_fence_fence_proto protoreflect.FileDescriptor
var file_fence_fence_proto_rawDesc = []byte{
0x0a, 0x11, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x05, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x2d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
0x63, 0x65, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x67, 0x6f, 0x2f, 0x63,
0x73, 0x69, 0x2f, 0x63, 0x73, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65,
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc,
0x02, 0x0a, 0x1a, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e,
0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a,
0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x31, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
0x12, 0x4d, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x2e, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12,
0x21, 0x0a, 0x05, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b,
0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x43, 0x49, 0x44, 0x52, 0x52, 0x05, 0x63, 0x69, 0x64,
0x72, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1d, 0x0a,
0x1b, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74,
0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe2, 0x02, 0x0a,
0x1c, 0x55, 0x6e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e,
0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a,
0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x33, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x66, 0x65, 0x6e, 0x63,
0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
0x72, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x66, 0x65,
0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72,
0x65, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x43, 0x49, 0x44, 0x52, 0x52,
0x05, 0x63, 0x69, 0x64, 0x72, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x6e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0xb0, 0x02, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74,
0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e,
0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4a,
0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x2b, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42,
0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61,
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63,
0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3d, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x0b, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x43, 0x49, 0x44, 0x52, 0x52, 0x05, 0x63,
0x69, 0x64, 0x72, 0x73, 0x22, 0x1a, 0x0a, 0x04, 0x43, 0x49, 0x44, 0x52, 0x12, 0x12, 0x0a, 0x04,
0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72,
0x32, 0xae, 0x02, 0x0a, 0x0f, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x6c, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x13, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x21, 0x2e, 0x66, 0x65,
0x6e, 0x63, 0x65, 0x2e, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x15, 0x55, 0x6e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x43,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x23, 0x2e,
0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x24, 0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x55, 0x6e, 0x66, 0x65, 0x6e,
0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x4c, 0x69,
0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1e,
0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74,
0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
0x2e, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74,
0x65, 0x72, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x63, 0x73, 0x69, 0x2d, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x2f,
0x6c, 0x69, 0x62, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_fence_fence_proto_rawDescOnce sync.Once
file_fence_fence_proto_rawDescData = file_fence_fence_proto_rawDesc
)
func file_fence_fence_proto_rawDescGZIP() []byte {
file_fence_fence_proto_rawDescOnce.Do(func() {
file_fence_fence_proto_rawDescData = protoimpl.X.CompressGZIP(file_fence_fence_proto_rawDescData)
})
return file_fence_fence_proto_rawDescData
}
var file_fence_fence_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_fence_fence_proto_goTypes = []interface{}{
(*FenceClusterNetworkRequest)(nil), // 0: fence.FenceClusterNetworkRequest
(*FenceClusterNetworkResponse)(nil), // 1: fence.FenceClusterNetworkResponse
(*UnfenceClusterNetworkRequest)(nil), // 2: fence.UnfenceClusterNetworkRequest
(*UnfenceClusterNetworkResponse)(nil), // 3: fence.UnfenceClusterNetworkResponse
(*ListClusterFenceRequest)(nil), // 4: fence.ListClusterFenceRequest
(*ListClusterFenceResponse)(nil), // 5: fence.ListClusterFenceResponse
(*CIDR)(nil), // 6: fence.CIDR
nil, // 7: fence.FenceClusterNetworkRequest.ParametersEntry
nil, // 8: fence.FenceClusterNetworkRequest.SecretsEntry
nil, // 9: fence.UnfenceClusterNetworkRequest.ParametersEntry
nil, // 10: fence.UnfenceClusterNetworkRequest.SecretsEntry
nil, // 11: fence.ListClusterFenceRequest.ParametersEntry
nil, // 12: fence.ListClusterFenceRequest.SecretsEntry
}
var file_fence_fence_proto_depIdxs = []int32{
7, // 0: fence.FenceClusterNetworkRequest.parameters:type_name -> fence.FenceClusterNetworkRequest.ParametersEntry
8, // 1: fence.FenceClusterNetworkRequest.secrets:type_name -> fence.FenceClusterNetworkRequest.SecretsEntry
6, // 2: fence.FenceClusterNetworkRequest.cidrs:type_name -> fence.CIDR
9, // 3: fence.UnfenceClusterNetworkRequest.parameters:type_name -> fence.UnfenceClusterNetworkRequest.ParametersEntry
10, // 4: fence.UnfenceClusterNetworkRequest.secrets:type_name -> fence.UnfenceClusterNetworkRequest.SecretsEntry
6, // 5: fence.UnfenceClusterNetworkRequest.cidrs:type_name -> fence.CIDR
11, // 6: fence.ListClusterFenceRequest.parameters:type_name -> fence.ListClusterFenceRequest.ParametersEntry
12, // 7: fence.ListClusterFenceRequest.secrets:type_name -> fence.ListClusterFenceRequest.SecretsEntry
6, // 8: fence.ListClusterFenceResponse.cidrs:type_name -> fence.CIDR
0, // 9: fence.FenceController.FenceClusterNetwork:input_type -> fence.FenceClusterNetworkRequest
2, // 10: fence.FenceController.UnfenceClusterNetwork:input_type -> fence.UnfenceClusterNetworkRequest
4, // 11: fence.FenceController.ListClusterFence:input_type -> fence.ListClusterFenceRequest
1, // 12: fence.FenceController.FenceClusterNetwork:output_type -> fence.FenceClusterNetworkResponse
3, // 13: fence.FenceController.UnfenceClusterNetwork:output_type -> fence.UnfenceClusterNetworkResponse
5, // 14: fence.FenceController.ListClusterFence:output_type -> fence.ListClusterFenceResponse
12, // [12:15] is the sub-list for method output_type
9, // [9:12] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_fence_fence_proto_init() }
func file_fence_fence_proto_init() {
if File_fence_fence_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_fence_fence_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FenceClusterNetworkRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FenceClusterNetworkResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UnfenceClusterNetworkRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UnfenceClusterNetworkResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListClusterFenceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListClusterFenceResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_fence_fence_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CIDR); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_fence_fence_proto_rawDesc,
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_fence_fence_proto_goTypes,
DependencyIndexes: file_fence_fence_proto_depIdxs,
MessageInfos: file_fence_fence_proto_msgTypes,
}.Build()
File_fence_fence_proto = out.File
file_fence_fence_proto_rawDesc = nil
file_fence_fence_proto_goTypes = nil
file_fence_fence_proto_depIdxs = nil
}

View File

@ -0,0 +1,181 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package fence
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// FenceControllerClient is the client API for FenceController service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type FenceControllerClient interface {
// FenceClusterNetwork RPC call to perform a fencing operation.
FenceClusterNetwork(ctx context.Context, in *FenceClusterNetworkRequest, opts ...grpc.CallOption) (*FenceClusterNetworkResponse, error)
// UnfenceClusterNetwork RPC call to remove a list of CIDR blocks from the
// list of blocklisted/fenced clients.
UnfenceClusterNetwork(ctx context.Context, in *UnfenceClusterNetworkRequest, opts ...grpc.CallOption) (*UnfenceClusterNetworkResponse, error)
// ListClusterFence RPC call to provide a list of blocklisted/fenced clients.
ListClusterFence(ctx context.Context, in *ListClusterFenceRequest, opts ...grpc.CallOption) (*ListClusterFenceResponse, error)
}
type fenceControllerClient struct {
cc grpc.ClientConnInterface
}
func NewFenceControllerClient(cc grpc.ClientConnInterface) FenceControllerClient {
return &fenceControllerClient{cc}
}
func (c *fenceControllerClient) FenceClusterNetwork(ctx context.Context, in *FenceClusterNetworkRequest, opts ...grpc.CallOption) (*FenceClusterNetworkResponse, error) {
out := new(FenceClusterNetworkResponse)
err := c.cc.Invoke(ctx, "/fence.FenceController/FenceClusterNetwork", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *fenceControllerClient) UnfenceClusterNetwork(ctx context.Context, in *UnfenceClusterNetworkRequest, opts ...grpc.CallOption) (*UnfenceClusterNetworkResponse, error) {
out := new(UnfenceClusterNetworkResponse)
err := c.cc.Invoke(ctx, "/fence.FenceController/UnfenceClusterNetwork", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *fenceControllerClient) ListClusterFence(ctx context.Context, in *ListClusterFenceRequest, opts ...grpc.CallOption) (*ListClusterFenceResponse, error) {
out := new(ListClusterFenceResponse)
err := c.cc.Invoke(ctx, "/fence.FenceController/ListClusterFence", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// FenceControllerServer is the server API for FenceController service.
// All implementations must embed UnimplementedFenceControllerServer
// for forward compatibility
type FenceControllerServer interface {
// FenceClusterNetwork RPC call to perform a fencing operation.
FenceClusterNetwork(context.Context, *FenceClusterNetworkRequest) (*FenceClusterNetworkResponse, error)
// UnfenceClusterNetwork RPC call to remove a list of CIDR blocks from the
// list of blocklisted/fenced clients.
UnfenceClusterNetwork(context.Context, *UnfenceClusterNetworkRequest) (*UnfenceClusterNetworkResponse, error)
// ListClusterFence RPC call to provide a list of blocklisted/fenced clients.
ListClusterFence(context.Context, *ListClusterFenceRequest) (*ListClusterFenceResponse, error)
mustEmbedUnimplementedFenceControllerServer()
}
// UnimplementedFenceControllerServer must be embedded to have forward compatible implementations.
type UnimplementedFenceControllerServer struct {
}
func (UnimplementedFenceControllerServer) FenceClusterNetwork(context.Context, *FenceClusterNetworkRequest) (*FenceClusterNetworkResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FenceClusterNetwork not implemented")
}
func (UnimplementedFenceControllerServer) UnfenceClusterNetwork(context.Context, *UnfenceClusterNetworkRequest) (*UnfenceClusterNetworkResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UnfenceClusterNetwork not implemented")
}
func (UnimplementedFenceControllerServer) ListClusterFence(context.Context, *ListClusterFenceRequest) (*ListClusterFenceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListClusterFence not implemented")
}
func (UnimplementedFenceControllerServer) mustEmbedUnimplementedFenceControllerServer() {}
// UnsafeFenceControllerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to FenceControllerServer will
// result in compilation errors.
type UnsafeFenceControllerServer interface {
mustEmbedUnimplementedFenceControllerServer()
}
func RegisterFenceControllerServer(s grpc.ServiceRegistrar, srv FenceControllerServer) {
s.RegisterService(&FenceController_ServiceDesc, srv)
}
func _FenceController_FenceClusterNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FenceClusterNetworkRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(FenceControllerServer).FenceClusterNetwork(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fence.FenceController/FenceClusterNetwork",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(FenceControllerServer).FenceClusterNetwork(ctx, req.(*FenceClusterNetworkRequest))
}
return interceptor(ctx, in, info, handler)
}
func _FenceController_UnfenceClusterNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnfenceClusterNetworkRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(FenceControllerServer).UnfenceClusterNetwork(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fence.FenceController/UnfenceClusterNetwork",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(FenceControllerServer).UnfenceClusterNetwork(ctx, req.(*UnfenceClusterNetworkRequest))
}
return interceptor(ctx, in, info, handler)
}
func _FenceController_ListClusterFence_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListClusterFenceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(FenceControllerServer).ListClusterFence(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/fence.FenceController/ListClusterFence",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(FenceControllerServer).ListClusterFence(ctx, req.(*ListClusterFenceRequest))
}
return interceptor(ctx, in, info, handler)
}
// FenceController_ServiceDesc is the grpc.ServiceDesc for FenceController service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var FenceController_ServiceDesc = grpc.ServiceDesc{
ServiceName: "fence.FenceController",
HandlerType: (*FenceControllerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "FenceClusterNetwork",
Handler: _FenceController_FenceClusterNetwork_Handler,
},
{
MethodName: "UnfenceClusterNetwork",
Handler: _FenceController_UnfenceClusterNetwork_Handler,
},
{
MethodName: "ListClusterFence",
Handler: _FenceController_ListClusterFence_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "fence/fence.proto",
}

View File

@ -91,6 +91,126 @@ func (Capability_Service_Type) EnumDescriptor() ([]byte, []int) {
return file_identity_identity_proto_rawDescGZIP(), []int{4, 0, 0}
}
// Type describes a CSI Service that CSI-drivers can support.
type Capability_ReclaimSpace_Type int32
const (
// UNKNOWN indicates that the CSI-driver does not support the ReclaimSpace
// operation in the current mode. The CSI-driver may be able to support
// the operation when is it configured differently. The CSI-Addons CO
// plugin will most likely ignore this node for the ReclaimSpace
// operation.
Capability_ReclaimSpace_UNKNOWN Capability_ReclaimSpace_Type = 0
// OFFLINE indicates that the CSI-driver provides RPCs for an offline
// ReclaimSpace operation.
// The presence of this capability determines whether the CSI-Addons CO
// plugin can invoke RPCs that require access to the storage system,
// similar to the CSI Controller (provisioner).
Capability_ReclaimSpace_OFFLINE Capability_ReclaimSpace_Type = 1
// ONLINE indicates that the CSI-driver provides RPCs for an online
// ReclaimSpace operation.
// The presence of this capability determines whether the CSI-Addons CO
// plugin can invoke RPCs that require a volume to be staged/attached to
// the node.
Capability_ReclaimSpace_ONLINE Capability_ReclaimSpace_Type = 2
)
// Enum value maps for Capability_ReclaimSpace_Type.
var (
Capability_ReclaimSpace_Type_name = map[int32]string{
0: "UNKNOWN",
1: "OFFLINE",
2: "ONLINE",
}
Capability_ReclaimSpace_Type_value = map[string]int32{
"UNKNOWN": 0,
"OFFLINE": 1,
"ONLINE": 2,
}
)
func (x Capability_ReclaimSpace_Type) Enum() *Capability_ReclaimSpace_Type {
p := new(Capability_ReclaimSpace_Type)
*p = x
return p
}
func (x Capability_ReclaimSpace_Type) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Capability_ReclaimSpace_Type) Descriptor() protoreflect.EnumDescriptor {
return file_identity_identity_proto_enumTypes[1].Descriptor()
}
func (Capability_ReclaimSpace_Type) Type() protoreflect.EnumType {
return &file_identity_identity_proto_enumTypes[1]
}
func (x Capability_ReclaimSpace_Type) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Capability_ReclaimSpace_Type.Descriptor instead.
func (Capability_ReclaimSpace_Type) EnumDescriptor() ([]byte, []int) {
return file_identity_identity_proto_rawDescGZIP(), []int{4, 1, 0}
}
// Type describes a CSI Service that CSI-drivers can support.
type Capability_NetworkFence_Type int32
const (
// UNKNOWN indicates that the CSI-driver does not support the NetworkFence
// operation in the current mode. The CSI-Addons CO plugin will most
// likely ignore this node for the NetworkFence operation.
Capability_NetworkFence_UNKNOWN Capability_NetworkFence_Type = 0
// NETWORK_FENCE indicates that the CSI-driver provides RPCs for a
// NetworkFence operation.
// The presence of this capability determines whether the CSI-Addons CO
// plugin can invoke RPCs that require access to the storage system,
// similar to the CSI Controller (provisioner).
Capability_NetworkFence_NETWORK_FENCE Capability_NetworkFence_Type = 1
)
// Enum value maps for Capability_NetworkFence_Type.
var (
Capability_NetworkFence_Type_name = map[int32]string{
0: "UNKNOWN",
1: "NETWORK_FENCE",
}
Capability_NetworkFence_Type_value = map[string]int32{
"UNKNOWN": 0,
"NETWORK_FENCE": 1,
}
)
func (x Capability_NetworkFence_Type) Enum() *Capability_NetworkFence_Type {
p := new(Capability_NetworkFence_Type)
*p = x
return p
}
func (x Capability_NetworkFence_Type) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Capability_NetworkFence_Type) Descriptor() protoreflect.EnumDescriptor {
return file_identity_identity_proto_enumTypes[2].Descriptor()
}
func (Capability_NetworkFence_Type) Type() protoreflect.EnumType {
return &file_identity_identity_proto_enumTypes[2]
}
func (x Capability_NetworkFence_Type) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Capability_NetworkFence_Type.Descriptor instead.
func (Capability_NetworkFence_Type) EnumDescriptor() ([]byte, []int) {
return file_identity_identity_proto_rawDescGZIP(), []int{4, 2, 0}
}
// GetIdentityRequest is sent by the CSI-Addons CO plugin to obtain the
// drivername, version and optional details from the CSI-driver.
type GetIdentityRequest struct {
@ -303,6 +423,8 @@ type Capability struct {
// Types that are assignable to Type:
// *Capability_Service_
// *Capability_ReclaimSpace_
// *Capability_NetworkFence_
Type isCapability_Type `protobuf_oneof:"type"`
}
@ -352,6 +474,20 @@ func (x *Capability) GetService() *Capability_Service {
return nil
}
func (x *Capability) GetReclaimSpace() *Capability_ReclaimSpace {
if x, ok := x.GetType().(*Capability_ReclaimSpace_); ok {
return x.ReclaimSpace
}
return nil
}
func (x *Capability) GetNetworkFence() *Capability_NetworkFence {
if x, ok := x.GetType().(*Capability_NetworkFence_); ok {
return x.NetworkFence
}
return nil
}
type isCapability_Type interface {
isCapability_Type()
}
@ -361,8 +497,22 @@ type Capability_Service_ struct {
Service *Capability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"`
}
type Capability_ReclaimSpace_ struct {
// ReclaimSpace operation capabilities.
ReclaimSpace *Capability_ReclaimSpace `protobuf:"bytes,2,opt,name=reclaim_space,json=reclaimSpace,proto3,oneof"`
}
type Capability_NetworkFence_ struct {
// NetworkFence operation capabilities
NetworkFence *Capability_NetworkFence `protobuf:"bytes,3,opt,name=network_fence,json=networkFence,proto3,oneof"`
}
func (*Capability_Service_) isCapability_Type() {}
func (*Capability_ReclaimSpace_) isCapability_Type() {}
func (*Capability_NetworkFence_) isCapability_Type() {}
// ProbeRequest is sent to the CSI-driver to confirm that it can respond to
// requests from the CSI-Addons CO plugin.
type ProbeRequest struct {
@ -520,6 +670,106 @@ func (x *Capability_Service) GetType() Capability_Service_Type {
return Capability_Service_UNKNOWN
}
// ReclaimSpace contains the features of the ReclaimSpace operation that the
// CSI-driver supports.
type Capability_ReclaimSpace struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// type contains the Type of CSI Service that the CSI-driver supports.
Type Capability_ReclaimSpace_Type `protobuf:"varint,1,opt,name=type,proto3,enum=identity.Capability_ReclaimSpace_Type" json:"type,omitempty"`
}
func (x *Capability_ReclaimSpace) Reset() {
*x = Capability_ReclaimSpace{}
if protoimpl.UnsafeEnabled {
mi := &file_identity_identity_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Capability_ReclaimSpace) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Capability_ReclaimSpace) ProtoMessage() {}
func (x *Capability_ReclaimSpace) ProtoReflect() protoreflect.Message {
mi := &file_identity_identity_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Capability_ReclaimSpace.ProtoReflect.Descriptor instead.
func (*Capability_ReclaimSpace) Descriptor() ([]byte, []int) {
return file_identity_identity_proto_rawDescGZIP(), []int{4, 1}
}
func (x *Capability_ReclaimSpace) GetType() Capability_ReclaimSpace_Type {
if x != nil {
return x.Type
}
return Capability_ReclaimSpace_UNKNOWN
}
// NetworkFence contains the features of the NetworkFence operation that the
// CSI-driver supports.
type Capability_NetworkFence struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// type contains the Type of CSI Service that the CSI-driver supports.
Type Capability_NetworkFence_Type `protobuf:"varint,1,opt,name=type,proto3,enum=identity.Capability_NetworkFence_Type" json:"type,omitempty"`
}
func (x *Capability_NetworkFence) Reset() {
*x = Capability_NetworkFence{}
if protoimpl.UnsafeEnabled {
mi := &file_identity_identity_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Capability_NetworkFence) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Capability_NetworkFence) ProtoMessage() {}
func (x *Capability_NetworkFence) ProtoReflect() protoreflect.Message {
mi := &file_identity_identity_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Capability_NetworkFence.ProtoReflect.Descriptor instead.
func (*Capability_NetworkFence) Descriptor() ([]byte, []int) {
return file_identity_identity_proto_rawDescGZIP(), []int{4, 2}
}
func (x *Capability_NetworkFence) GetType() Capability_NetworkFence_Type {
if x != nil {
return x.Type
}
return Capability_NetworkFence_UNKNOWN
}
var File_identity_identity_proto protoreflect.FileDescriptor
var file_identity_identity_proto_rawDesc = []byte{
@ -549,41 +799,65 @@ var file_identity_identity_proto_rawDesc = []byte{
0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c,
0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65,
0x73, 0x22, 0xcf, 0x01, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
0x73, 0x22, 0xd1, 0x04, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
0x12, 0x38, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43, 0x61, 0x70,
0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48,
0x00, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x7f, 0x0a, 0x07, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43,
0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3d, 0x0a, 0x04,
0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
0x00, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x4f, 0x4c, 0x4c, 0x45, 0x52, 0x5f,
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4e, 0x4f, 0x44,
0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x02, 0x42, 0x06, 0x0a, 0x04, 0x74,
0x79, 0x70, 0x65, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x22, 0x41, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x32, 0xee, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74,
0x69, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
0x74, 0x79, 0x12, 0x1c, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x47, 0x65,
0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1d, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x49,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69,
0x74, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e,
0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
0x00, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x72, 0x65,
0x63, 0x6c, 0x61, 0x69, 0x6d, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x21, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43, 0x61, 0x70,
0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53,
0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53,
0x70, 0x61, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
0x66, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x64,
0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
0x79, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00,
0x52, 0x0c, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x7f,
0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
0x74, 0x79, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
0x22, 0x3d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x4f, 0x4c,
0x4c, 0x45, 0x52, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a,
0x0c, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x02, 0x1a,
0x78, 0x0a, 0x0c, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12,
0x3a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e,
0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c,
0x69, 0x74, 0x79, 0x2e, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65,
0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x04, 0x54,
0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a,
0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x1a, 0x72, 0x0a, 0x0c, 0x4e, 0x65, 0x74,
0x77, 0x6f, 0x72, 0x6b, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x79, 0x70,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
0x74, 0x79, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x4e, 0x65,
0x74, 0x77, 0x6f, 0x72, 0x6b, 0x46, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x45,
0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x46, 0x45, 0x4e, 0x43, 0x45, 0x10, 0x01, 0x42, 0x06, 0x0a,
0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x41, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x32, 0xee, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e,
0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e,
0x47, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x47, 0x65,
0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69,
0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
0x79, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x50,
0x72, 0x6f, 0x62, 0x65, 0x12, 0x16, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e,
0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x69, 0x64, 0x65,
0x6e, 0x74, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74,
0x69, 0x74, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a,
0x05, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x12, 0x16, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
0x79, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17,
0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x3b, 0x69,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -598,38 +872,46 @@ func file_identity_identity_proto_rawDescGZIP() []byte {
return file_identity_identity_proto_rawDescData
}
var file_identity_identity_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_identity_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_identity_identity_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_identity_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_identity_identity_proto_goTypes = []interface{}{
(Capability_Service_Type)(0), // 0: identity.Capability.Service.Type
(*GetIdentityRequest)(nil), // 1: identity.GetIdentityRequest
(*GetIdentityResponse)(nil), // 2: identity.GetIdentityResponse
(*GetCapabilitiesRequest)(nil), // 3: identity.GetCapabilitiesRequest
(*GetCapabilitiesResponse)(nil), // 4: identity.GetCapabilitiesResponse
(*Capability)(nil), // 5: identity.Capability
(*ProbeRequest)(nil), // 6: identity.ProbeRequest
(*ProbeResponse)(nil), // 7: identity.ProbeResponse
nil, // 8: identity.GetIdentityResponse.ManifestEntry
(*Capability_Service)(nil), // 9: identity.Capability.Service
(*wrapperspb.BoolValue)(nil), // 10: google.protobuf.BoolValue
(Capability_ReclaimSpace_Type)(0), // 1: identity.Capability.ReclaimSpace.Type
(Capability_NetworkFence_Type)(0), // 2: identity.Capability.NetworkFence.Type
(*GetIdentityRequest)(nil), // 3: identity.GetIdentityRequest
(*GetIdentityResponse)(nil), // 4: identity.GetIdentityResponse
(*GetCapabilitiesRequest)(nil), // 5: identity.GetCapabilitiesRequest
(*GetCapabilitiesResponse)(nil), // 6: identity.GetCapabilitiesResponse
(*Capability)(nil), // 7: identity.Capability
(*ProbeRequest)(nil), // 8: identity.ProbeRequest
(*ProbeResponse)(nil), // 9: identity.ProbeResponse
nil, // 10: identity.GetIdentityResponse.ManifestEntry
(*Capability_Service)(nil), // 11: identity.Capability.Service
(*Capability_ReclaimSpace)(nil), // 12: identity.Capability.ReclaimSpace
(*Capability_NetworkFence)(nil), // 13: identity.Capability.NetworkFence
(*wrapperspb.BoolValue)(nil), // 14: google.protobuf.BoolValue
}
var file_identity_identity_proto_depIdxs = []int32{
8, // 0: identity.GetIdentityResponse.manifest:type_name -> identity.GetIdentityResponse.ManifestEntry
5, // 1: identity.GetCapabilitiesResponse.capabilities:type_name -> identity.Capability
9, // 2: identity.Capability.service:type_name -> identity.Capability.Service
10, // 3: identity.ProbeResponse.ready:type_name -> google.protobuf.BoolValue
0, // 4: identity.Capability.Service.type:type_name -> identity.Capability.Service.Type
1, // 5: identity.Identity.GetIdentity:input_type -> identity.GetIdentityRequest
3, // 6: identity.Identity.GetCapabilities:input_type -> identity.GetCapabilitiesRequest
6, // 7: identity.Identity.Probe:input_type -> identity.ProbeRequest
2, // 8: identity.Identity.GetIdentity:output_type -> identity.GetIdentityResponse
4, // 9: identity.Identity.GetCapabilities:output_type -> identity.GetCapabilitiesResponse
7, // 10: identity.Identity.Probe:output_type -> identity.ProbeResponse
8, // [8:11] is the sub-list for method output_type
5, // [5:8] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
10, // 0: identity.GetIdentityResponse.manifest:type_name -> identity.GetIdentityResponse.ManifestEntry
7, // 1: identity.GetCapabilitiesResponse.capabilities:type_name -> identity.Capability
11, // 2: identity.Capability.service:type_name -> identity.Capability.Service
12, // 3: identity.Capability.reclaim_space:type_name -> identity.Capability.ReclaimSpace
13, // 4: identity.Capability.network_fence:type_name -> identity.Capability.NetworkFence
14, // 5: identity.ProbeResponse.ready:type_name -> google.protobuf.BoolValue
0, // 6: identity.Capability.Service.type:type_name -> identity.Capability.Service.Type
1, // 7: identity.Capability.ReclaimSpace.type:type_name -> identity.Capability.ReclaimSpace.Type
2, // 8: identity.Capability.NetworkFence.type:type_name -> identity.Capability.NetworkFence.Type
3, // 9: identity.Identity.GetIdentity:input_type -> identity.GetIdentityRequest
5, // 10: identity.Identity.GetCapabilities:input_type -> identity.GetCapabilitiesRequest
8, // 11: identity.Identity.Probe:input_type -> identity.ProbeRequest
4, // 12: identity.Identity.GetIdentity:output_type -> identity.GetIdentityResponse
6, // 13: identity.Identity.GetCapabilities:output_type -> identity.GetCapabilitiesResponse
9, // 14: identity.Identity.Probe:output_type -> identity.ProbeResponse
12, // [12:15] is the sub-list for method output_type
9, // [9:12] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_identity_identity_proto_init() }
@ -734,17 +1016,43 @@ func file_identity_identity_proto_init() {
return nil
}
}
file_identity_identity_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Capability_ReclaimSpace); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_identity_identity_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Capability_NetworkFence); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_identity_identity_proto_msgTypes[4].OneofWrappers = []interface{}{
(*Capability_Service_)(nil),
(*Capability_ReclaimSpace_)(nil),
(*Capability_NetworkFence_)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_identity_identity_proto_rawDesc,
NumEnums: 1,
NumMessages: 9,
NumEnums: 3,
NumMessages: 11,
NumExtensions: 0,
NumServices: 1,
},

View File

@ -0,0 +1,612 @@
// Code generated by make; DO NOT EDIT.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.14.0
// source: reclaimspace/reclaimspace.proto
package reclaimspace
import (
csi "github.com/container-storage-interface/spec/lib/go/csi"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
_ "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// ControllerReclaimSpaceRequest contains the information needed to identify
// the volume by the SP and access any backend services so that space can be
// reclaimed.
type ControllerReclaimSpaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The ID of the volume. This field is REQUIRED.
VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
// Plugin specific parameters passed in as opaque key-value pairs.
Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Secrets required by the plugin to complete the request.
Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *ControllerReclaimSpaceRequest) Reset() {
*x = ControllerReclaimSpaceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ControllerReclaimSpaceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ControllerReclaimSpaceRequest) ProtoMessage() {}
func (x *ControllerReclaimSpaceRequest) ProtoReflect() protoreflect.Message {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ControllerReclaimSpaceRequest.ProtoReflect.Descriptor instead.
func (*ControllerReclaimSpaceRequest) Descriptor() ([]byte, []int) {
return file_reclaimspace_reclaimspace_proto_rawDescGZIP(), []int{0}
}
func (x *ControllerReclaimSpaceRequest) GetVolumeId() string {
if x != nil {
return x.VolumeId
}
return ""
}
func (x *ControllerReclaimSpaceRequest) GetParameters() map[string]string {
if x != nil {
return x.Parameters
}
return nil
}
func (x *ControllerReclaimSpaceRequest) GetSecrets() map[string]string {
if x != nil {
return x.Secrets
}
return nil
}
// ControllerReclaimSpaceResponse holds the information about the result of the
// ControllerReclaimSpaceRequest call.
type ControllerReclaimSpaceResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// This field is OPTIONAL. This allows the SP to inform the CO about the
// storage consumption before the ReclaimSpace operation was executed.
PreUsage *StorageConsumption `protobuf:"bytes,1,opt,name=pre_usage,json=preUsage,proto3" json:"pre_usage,omitempty"`
// This field is OPTIONAL. This allows the SP to inform the CO about the
// storage consumption after the ReclaimSpace operation was executed.
PostUsage *StorageConsumption `protobuf:"bytes,2,opt,name=post_usage,json=postUsage,proto3" json:"post_usage,omitempty"`
}
func (x *ControllerReclaimSpaceResponse) Reset() {
*x = ControllerReclaimSpaceResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ControllerReclaimSpaceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ControllerReclaimSpaceResponse) ProtoMessage() {}
func (x *ControllerReclaimSpaceResponse) ProtoReflect() protoreflect.Message {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ControllerReclaimSpaceResponse.ProtoReflect.Descriptor instead.
func (*ControllerReclaimSpaceResponse) Descriptor() ([]byte, []int) {
return file_reclaimspace_reclaimspace_proto_rawDescGZIP(), []int{1}
}
func (x *ControllerReclaimSpaceResponse) GetPreUsage() *StorageConsumption {
if x != nil {
return x.PreUsage
}
return nil
}
func (x *ControllerReclaimSpaceResponse) GetPostUsage() *StorageConsumption {
if x != nil {
return x.PostUsage
}
return nil
}
// StorageConsumption contains the usage in bytes.
type StorageConsumption struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// This field is REQUIRED. usage_bytes contains the consumed storage in
// bytes.
UsageBytes int64 `protobuf:"varint,1,opt,name=usage_bytes,json=usageBytes,proto3" json:"usage_bytes,omitempty"`
}
func (x *StorageConsumption) Reset() {
*x = StorageConsumption{}
if protoimpl.UnsafeEnabled {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StorageConsumption) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StorageConsumption) ProtoMessage() {}
func (x *StorageConsumption) ProtoReflect() protoreflect.Message {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StorageConsumption.ProtoReflect.Descriptor instead.
func (*StorageConsumption) Descriptor() ([]byte, []int) {
return file_reclaimspace_reclaimspace_proto_rawDescGZIP(), []int{2}
}
func (x *StorageConsumption) GetUsageBytes() int64 {
if x != nil {
return x.UsageBytes
}
return 0
}
// NodeReclaimSpaceRequest contains the information needed to identify the
// location where the volume is mounted so that local filesystem or
// block-device operations to reclaim space can be executed.
type NodeReclaimSpaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The ID of the volume. This field is REQUIRED.
VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
// The path on which volume is available. This field is REQUIRED.
// This field overrides the general CSI size limit.
// SP SHOULD support the maximum path length allowed by the operating
// system/filesystem, but, at a minimum, SP MUST accept a max path
// length of at least 128 bytes.
VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"`
// The path where the volume is staged, if the plugin has the
// STAGE_UNSTAGE_VOLUME capability, otherwise empty.
// If not empty, it MUST be an absolute path in the root
// filesystem of the process serving this request.
// This field is OPTIONAL.
// This field overrides the general CSI size limit.
// SP SHOULD support the maximum path length allowed by the operating
// system/filesystem, but, at a minimum, SP MUST accept a max path
// length of at least 128 bytes.
StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
// Volume capability describing how the CO intends to use this volume.
// This allows SP to determine if volume is being used as a block
// device or mounted file system. For example - if volume is being
// used as a block device the SP MAY choose to skip calling filesystem
// operations to reclaim space, but still perform rest of the housekeeping
// needed for reducing the size of the volume. If volume_capability is
// omitted the SP MAY determine access_type from given volume_path for the
// volume and perform space reduction. This is an OPTIONAL field.
VolumeCapability *csi.VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
// Secrets required by plugin to complete the reclaim space operation.
// This field is OPTIONAL.
Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *NodeReclaimSpaceRequest) Reset() {
*x = NodeReclaimSpaceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeReclaimSpaceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeReclaimSpaceRequest) ProtoMessage() {}
func (x *NodeReclaimSpaceRequest) ProtoReflect() protoreflect.Message {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeReclaimSpaceRequest.ProtoReflect.Descriptor instead.
func (*NodeReclaimSpaceRequest) Descriptor() ([]byte, []int) {
return file_reclaimspace_reclaimspace_proto_rawDescGZIP(), []int{3}
}
func (x *NodeReclaimSpaceRequest) GetVolumeId() string {
if x != nil {
return x.VolumeId
}
return ""
}
func (x *NodeReclaimSpaceRequest) GetVolumePath() string {
if x != nil {
return x.VolumePath
}
return ""
}
func (x *NodeReclaimSpaceRequest) GetStagingTargetPath() string {
if x != nil {
return x.StagingTargetPath
}
return ""
}
func (x *NodeReclaimSpaceRequest) GetVolumeCapability() *csi.VolumeCapability {
if x != nil {
return x.VolumeCapability
}
return nil
}
func (x *NodeReclaimSpaceRequest) GetSecrets() map[string]string {
if x != nil {
return x.Secrets
}
return nil
}
// NodeReclaimSpaceResponse holds the information about the result of the
// NodeReclaimSpaceRequest call.
type NodeReclaimSpaceResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// This field is OPTIONAL. This allows the SP to inform the CO about the
// storage consumption before the ReclaimSpace operation was executed.
PreUsage *StorageConsumption `protobuf:"bytes,1,opt,name=pre_usage,json=preUsage,proto3" json:"pre_usage,omitempty"`
// This field is OPTIONAL. This allows the SP to inform the CO about the
// storage consumption after the ReclaimSpace operation was executed.
PostUsage *StorageConsumption `protobuf:"bytes,2,opt,name=post_usage,json=postUsage,proto3" json:"post_usage,omitempty"`
}
func (x *NodeReclaimSpaceResponse) Reset() {
*x = NodeReclaimSpaceResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeReclaimSpaceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeReclaimSpaceResponse) ProtoMessage() {}
func (x *NodeReclaimSpaceResponse) ProtoReflect() protoreflect.Message {
mi := &file_reclaimspace_reclaimspace_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeReclaimSpaceResponse.ProtoReflect.Descriptor instead.
func (*NodeReclaimSpaceResponse) Descriptor() ([]byte, []int) {
return file_reclaimspace_reclaimspace_proto_rawDescGZIP(), []int{4}
}
func (x *NodeReclaimSpaceResponse) GetPreUsage() *StorageConsumption {
if x != nil {
return x.PreUsage
}
return nil
}
func (x *NodeReclaimSpaceResponse) GetPostUsage() *StorageConsumption {
if x != nil {
return x.PostUsage
}
return nil
}
var File_reclaimspace_reclaimspace_proto protoreflect.FileDescriptor
var file_reclaimspace_reclaimspace_proto_rawDesc = []byte{
0x0a, 0x1f, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x72,
0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x0c, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a,
0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
0x61, 0x69, 0x6e, 0x65, 0x72, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2d, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x2f, 0x6c, 0x69, 0x62,
0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x73, 0x69, 0x2f, 0x63, 0x73, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xed, 0x02, 0x0a, 0x1d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x49, 0x64, 0x12, 0x5b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d,
0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
0x57, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x38, 0x2e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e,
0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69,
0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65,
0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52,
0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61,
0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65,
0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x22, 0xa0, 0x01, 0x0a, 0x1e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x5f, 0x75, 0x73,
0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x63, 0x6c,
0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x70, 0x72, 0x65,
0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x0a, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x75, 0x73,
0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x63, 0x6c,
0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x6f, 0x73,
0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b,
0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
0x03, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xdd, 0x02,
0x0a, 0x17, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61,
0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x67, 0x69,
0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72,
0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x10, 0x76, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x51,
0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x32, 0x2e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e,
0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74,
0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x01,
0x0a, 0x18, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x70, 0x72,
0x65, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x08, 0x70, 0x72, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x0a, 0x70, 0x6f, 0x73,
0x74, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x09, 0x70, 0x6f, 0x73, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x32, 0x8f, 0x01, 0x0a, 0x16, 0x52,
0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x75, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
0x6c, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12,
0x2b, 0x2e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d,
0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72,
0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x77, 0x0a, 0x10,
0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65,
0x12, 0x63, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53,
0x70, 0x61, 0x63, 0x65, 0x12, 0x25, 0x2e, 0x72, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70,
0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53,
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x72, 0x65,
0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52,
0x65, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x73, 0x69, 0x2d, 0x61, 0x64, 0x64, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
0x70, 0x65, 0x63, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x67, 0x6f, 0x2f, 0x72, 0x65, 0x63, 0x6c, 0x61,
0x69, 0x6d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_reclaimspace_reclaimspace_proto_rawDescOnce sync.Once
file_reclaimspace_reclaimspace_proto_rawDescData = file_reclaimspace_reclaimspace_proto_rawDesc
)
func file_reclaimspace_reclaimspace_proto_rawDescGZIP() []byte {
file_reclaimspace_reclaimspace_proto_rawDescOnce.Do(func() {
file_reclaimspace_reclaimspace_proto_rawDescData = protoimpl.X.CompressGZIP(file_reclaimspace_reclaimspace_proto_rawDescData)
})
return file_reclaimspace_reclaimspace_proto_rawDescData
}
var file_reclaimspace_reclaimspace_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_reclaimspace_reclaimspace_proto_goTypes = []interface{}{
(*ControllerReclaimSpaceRequest)(nil), // 0: reclaimspace.ControllerReclaimSpaceRequest
(*ControllerReclaimSpaceResponse)(nil), // 1: reclaimspace.ControllerReclaimSpaceResponse
(*StorageConsumption)(nil), // 2: reclaimspace.StorageConsumption
(*NodeReclaimSpaceRequest)(nil), // 3: reclaimspace.NodeReclaimSpaceRequest
(*NodeReclaimSpaceResponse)(nil), // 4: reclaimspace.NodeReclaimSpaceResponse
nil, // 5: reclaimspace.ControllerReclaimSpaceRequest.ParametersEntry
nil, // 6: reclaimspace.ControllerReclaimSpaceRequest.SecretsEntry
nil, // 7: reclaimspace.NodeReclaimSpaceRequest.SecretsEntry
(*csi.VolumeCapability)(nil), // 8: csi.v1.VolumeCapability
}
var file_reclaimspace_reclaimspace_proto_depIdxs = []int32{
5, // 0: reclaimspace.ControllerReclaimSpaceRequest.parameters:type_name -> reclaimspace.ControllerReclaimSpaceRequest.ParametersEntry
6, // 1: reclaimspace.ControllerReclaimSpaceRequest.secrets:type_name -> reclaimspace.ControllerReclaimSpaceRequest.SecretsEntry
2, // 2: reclaimspace.ControllerReclaimSpaceResponse.pre_usage:type_name -> reclaimspace.StorageConsumption
2, // 3: reclaimspace.ControllerReclaimSpaceResponse.post_usage:type_name -> reclaimspace.StorageConsumption
8, // 4: reclaimspace.NodeReclaimSpaceRequest.volume_capability:type_name -> csi.v1.VolumeCapability
7, // 5: reclaimspace.NodeReclaimSpaceRequest.secrets:type_name -> reclaimspace.NodeReclaimSpaceRequest.SecretsEntry
2, // 6: reclaimspace.NodeReclaimSpaceResponse.pre_usage:type_name -> reclaimspace.StorageConsumption
2, // 7: reclaimspace.NodeReclaimSpaceResponse.post_usage:type_name -> reclaimspace.StorageConsumption
0, // 8: reclaimspace.ReclaimSpaceController.ControllerReclaimSpace:input_type -> reclaimspace.ControllerReclaimSpaceRequest
3, // 9: reclaimspace.ReclaimSpaceNode.NodeReclaimSpace:input_type -> reclaimspace.NodeReclaimSpaceRequest
1, // 10: reclaimspace.ReclaimSpaceController.ControllerReclaimSpace:output_type -> reclaimspace.ControllerReclaimSpaceResponse
4, // 11: reclaimspace.ReclaimSpaceNode.NodeReclaimSpace:output_type -> reclaimspace.NodeReclaimSpaceResponse
10, // [10:12] is the sub-list for method output_type
8, // [8:10] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_reclaimspace_reclaimspace_proto_init() }
func file_reclaimspace_reclaimspace_proto_init() {
if File_reclaimspace_reclaimspace_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_reclaimspace_reclaimspace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ControllerReclaimSpaceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_reclaimspace_reclaimspace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ControllerReclaimSpaceResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_reclaimspace_reclaimspace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StorageConsumption); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_reclaimspace_reclaimspace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeReclaimSpaceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_reclaimspace_reclaimspace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeReclaimSpaceResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_reclaimspace_reclaimspace_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 2,
},
GoTypes: file_reclaimspace_reclaimspace_proto_goTypes,
DependencyIndexes: file_reclaimspace_reclaimspace_proto_depIdxs,
MessageInfos: file_reclaimspace_reclaimspace_proto_msgTypes,
}.Build()
File_reclaimspace_reclaimspace_proto = out.File
file_reclaimspace_reclaimspace_proto_rawDesc = nil
file_reclaimspace_reclaimspace_proto_goTypes = nil
file_reclaimspace_reclaimspace_proto_depIdxs = nil
}

View File

@ -0,0 +1,194 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package reclaimspace
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// ReclaimSpaceControllerClient is the client API for ReclaimSpaceController service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ReclaimSpaceControllerClient interface {
// ControllerReclaimSpace is a procedure that gets called on the CSI
// Controller.
ControllerReclaimSpace(ctx context.Context, in *ControllerReclaimSpaceRequest, opts ...grpc.CallOption) (*ControllerReclaimSpaceResponse, error)
}
type reclaimSpaceControllerClient struct {
cc grpc.ClientConnInterface
}
func NewReclaimSpaceControllerClient(cc grpc.ClientConnInterface) ReclaimSpaceControllerClient {
return &reclaimSpaceControllerClient{cc}
}
func (c *reclaimSpaceControllerClient) ControllerReclaimSpace(ctx context.Context, in *ControllerReclaimSpaceRequest, opts ...grpc.CallOption) (*ControllerReclaimSpaceResponse, error) {
out := new(ControllerReclaimSpaceResponse)
err := c.cc.Invoke(ctx, "/reclaimspace.ReclaimSpaceController/ControllerReclaimSpace", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ReclaimSpaceControllerServer is the server API for ReclaimSpaceController service.
// All implementations must embed UnimplementedReclaimSpaceControllerServer
// for forward compatibility
type ReclaimSpaceControllerServer interface {
// ControllerReclaimSpace is a procedure that gets called on the CSI
// Controller.
ControllerReclaimSpace(context.Context, *ControllerReclaimSpaceRequest) (*ControllerReclaimSpaceResponse, error)
mustEmbedUnimplementedReclaimSpaceControllerServer()
}
// UnimplementedReclaimSpaceControllerServer must be embedded to have forward compatible implementations.
type UnimplementedReclaimSpaceControllerServer struct {
}
func (UnimplementedReclaimSpaceControllerServer) ControllerReclaimSpace(context.Context, *ControllerReclaimSpaceRequest) (*ControllerReclaimSpaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ControllerReclaimSpace not implemented")
}
func (UnimplementedReclaimSpaceControllerServer) mustEmbedUnimplementedReclaimSpaceControllerServer() {
}
// UnsafeReclaimSpaceControllerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ReclaimSpaceControllerServer will
// result in compilation errors.
type UnsafeReclaimSpaceControllerServer interface {
mustEmbedUnimplementedReclaimSpaceControllerServer()
}
func RegisterReclaimSpaceControllerServer(s grpc.ServiceRegistrar, srv ReclaimSpaceControllerServer) {
s.RegisterService(&ReclaimSpaceController_ServiceDesc, srv)
}
func _ReclaimSpaceController_ControllerReclaimSpace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ControllerReclaimSpaceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReclaimSpaceControllerServer).ControllerReclaimSpace(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/reclaimspace.ReclaimSpaceController/ControllerReclaimSpace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReclaimSpaceControllerServer).ControllerReclaimSpace(ctx, req.(*ControllerReclaimSpaceRequest))
}
return interceptor(ctx, in, info, handler)
}
// ReclaimSpaceController_ServiceDesc is the grpc.ServiceDesc for ReclaimSpaceController service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var ReclaimSpaceController_ServiceDesc = grpc.ServiceDesc{
ServiceName: "reclaimspace.ReclaimSpaceController",
HandlerType: (*ReclaimSpaceControllerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ControllerReclaimSpace",
Handler: _ReclaimSpaceController_ControllerReclaimSpace_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "reclaimspace/reclaimspace.proto",
}
// ReclaimSpaceNodeClient is the client API for ReclaimSpaceNode service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ReclaimSpaceNodeClient interface {
// NodeReclaimSpace is a procedure that gets called on the CSI NodePlugin.
NodeReclaimSpace(ctx context.Context, in *NodeReclaimSpaceRequest, opts ...grpc.CallOption) (*NodeReclaimSpaceResponse, error)
}
type reclaimSpaceNodeClient struct {
cc grpc.ClientConnInterface
}
func NewReclaimSpaceNodeClient(cc grpc.ClientConnInterface) ReclaimSpaceNodeClient {
return &reclaimSpaceNodeClient{cc}
}
func (c *reclaimSpaceNodeClient) NodeReclaimSpace(ctx context.Context, in *NodeReclaimSpaceRequest, opts ...grpc.CallOption) (*NodeReclaimSpaceResponse, error) {
out := new(NodeReclaimSpaceResponse)
err := c.cc.Invoke(ctx, "/reclaimspace.ReclaimSpaceNode/NodeReclaimSpace", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ReclaimSpaceNodeServer is the server API for ReclaimSpaceNode service.
// All implementations must embed UnimplementedReclaimSpaceNodeServer
// for forward compatibility
type ReclaimSpaceNodeServer interface {
// NodeReclaimSpace is a procedure that gets called on the CSI NodePlugin.
NodeReclaimSpace(context.Context, *NodeReclaimSpaceRequest) (*NodeReclaimSpaceResponse, error)
mustEmbedUnimplementedReclaimSpaceNodeServer()
}
// UnimplementedReclaimSpaceNodeServer must be embedded to have forward compatible implementations.
type UnimplementedReclaimSpaceNodeServer struct {
}
func (UnimplementedReclaimSpaceNodeServer) NodeReclaimSpace(context.Context, *NodeReclaimSpaceRequest) (*NodeReclaimSpaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method NodeReclaimSpace not implemented")
}
func (UnimplementedReclaimSpaceNodeServer) mustEmbedUnimplementedReclaimSpaceNodeServer() {}
// UnsafeReclaimSpaceNodeServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ReclaimSpaceNodeServer will
// result in compilation errors.
type UnsafeReclaimSpaceNodeServer interface {
mustEmbedUnimplementedReclaimSpaceNodeServer()
}
func RegisterReclaimSpaceNodeServer(s grpc.ServiceRegistrar, srv ReclaimSpaceNodeServer) {
s.RegisterService(&ReclaimSpaceNode_ServiceDesc, srv)
}
func _ReclaimSpaceNode_NodeReclaimSpace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NodeReclaimSpaceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReclaimSpaceNodeServer).NodeReclaimSpace(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/reclaimspace.ReclaimSpaceNode/NodeReclaimSpace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReclaimSpaceNodeServer).NodeReclaimSpace(ctx, req.(*NodeReclaimSpaceRequest))
}
return interceptor(ctx, in, info, handler)
}
// ReclaimSpaceNode_ServiceDesc is the grpc.ServiceDesc for ReclaimSpaceNode service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var ReclaimSpaceNode_ServiceDesc = grpc.ServiceDesc{
ServiceName: "reclaimspace.ReclaimSpaceNode",
HandlerType: (*ReclaimSpaceNodeServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "NodeReclaimSpace",
Handler: _ReclaimSpaceNode_NodeReclaimSpace_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "reclaimspace/reclaimspace.proto",
}

6
vendor/modules.txt vendored
View File

@ -69,7 +69,7 @@ github.com/cenkalti/backoff/v3
## explicit; go 1.16
github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd
github.com/ceph/ceph-csi/api/deploy/ocp
# github.com/ceph/go-ceph v0.12.0
# github.com/ceph/go-ceph v0.13.0
## explicit; go 1.12
github.com/ceph/go-ceph/cephfs/admin
github.com/ceph/go-ceph/common/commands
@ -91,9 +91,11 @@ github.com/container-storage-interface/spec/lib/go/csi
# github.com/csi-addons/replication-lib-utils v0.2.0
## explicit; go 1.15
github.com/csi-addons/replication-lib-utils/protosanitizer
# github.com/csi-addons/spec v0.1.2-0.20211123125058-fd968c478af7
# github.com/csi-addons/spec v0.1.2-0.20211220115741-32fa508dadbe
## explicit
github.com/csi-addons/spec/lib/go/fence
github.com/csi-addons/spec/lib/go/identity
github.com/csi-addons/spec/lib/go/reclaimspace
github.com/csi-addons/spec/lib/go/replication
# github.com/cyphar/filepath-securejoin v0.2.2
## explicit