Merge pull request #165 from ceph/devel

sync downstream with upstream devel
This commit is contained in:
OpenShift Merge Robot 2023-07-06 01:40:38 -04:00 committed by GitHub
commit 62312fa693
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 2098 additions and 836 deletions

View File

@ -44,7 +44,5 @@ the following bot commands in an otherwise empty comment in this PR:
- `/retest ci/centos/<job-name>`: retest the `<job-name>` after unrelated
failure (please report the failure too!)
- `/retest all`: run this in case the CentOS CI failed to start/report any test
progress or results
</details>

View File

@ -182,15 +182,6 @@ pull_request_rules:
name: default
delete_head_branch: {}
- name: backport patches to release-v3.7 branch
conditions:
- base=devel
- label=backport-to-release-v3.7
actions:
backport:
branches:
- release-v3.7
- name: backport patches to release-v3.8 branch
conditions:
- base=devel
@ -200,6 +191,15 @@ pull_request_rules:
branches:
- release-v3.8
- name: backport patches to release-v3.9 branch
conditions:
- base=devel
- label=backport-to-release-v3.9
actions:
backport:
branches:
- release-v3.9
- name: remove outdated approvals on ci/centos
conditions:
- base=ci/centos

View File

@ -56,10 +56,8 @@ environments.
| Ceph CSI Version | Container Orchestrator Name | Version Tested|
| -----------------| --------------------------- | --------------|
| v3.9.0 | Kubernetes | v1.25, v1.26, v1.27|
| v3.8.0 | Kubernetes | v1.24, v1.25, v1.26, v1.27|
| v3.7.2 | Kubernetes | v1.22, v1.23, v1.24|
| v3.7.1 | Kubernetes | v1.22, v1.23, v1.24|
| v3.7.0 | Kubernetes | v1.22, v1.23, v1.24|
There is work in progress to make this CO-independent and thus
support other orchestration environments (Nomad, Mesos..etc).
@ -69,8 +67,8 @@ NOTE:
The supported window of Ceph CSI versions is "N.(x-1)":
(N (Latest major release) . (x (Latest minor release) - 1)).
For example, if the Ceph CSI latest major version is `3.8.0` today, support is
provided for the versions above `3.7.0`. If users are running an unsupported
For example, if the Ceph CSI latest major version is `3.9.0` today, support is
provided for the versions above `3.8.0`. If users are running an unsupported
Ceph CSI version, they will be asked to upgrade when requesting support.
## Support Matrix
@ -130,13 +128,14 @@ in the Kubernetes documentation.
| Ceph CSI Release/Branch | Container image name | Image Tag |
| ----------------------- | ---------------------------- | --------- |
| devel (Branch) | quay.io/cephcsi/cephcsi | canary |
| v3.9.0 (Release) | quay.io/cephcsi/cephcsi | v3.9.0 |
| v3.8.0 (Release) | quay.io/cephcsi/cephcsi | v3.8.0 |
| v3.7.2 (Release) | quay.io/cephcsi/cephcsi | v3.7.2 |
| v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 |
| v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 |
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
| ----------------------- | --------------------------------| --------- |
| v3.7.2 (Release) | quay.io/cephcsi/cephcsi | v3.7.2 |
| v3.7.1 (Release) | quay.io/cephcsi/cephcsi | v3.7.1 |
| v3.7.0 (Release) | quay.io/cephcsi/cephcsi | v3.7.0 |
| v3.6.1 (Release) | quay.io/cephcsi/cephcsi | v3.6.1 |
| v3.6.0 (Release) | quay.io/cephcsi/cephcsi | v3.6.0 |
| v3.5.1 (Release) | quay.io/cephcsi/cephcsi | v3.5.1 |

View File

@ -6,7 +6,7 @@ require (
github.com/ghodss/yaml v1.0.0
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341
github.com/stretchr/testify v1.8.4
k8s.io/api v0.27.2
k8s.io/api v0.27.3
)
require (
@ -23,7 +23,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.27.2 // indirect
k8s.io/apimachinery v0.27.3 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect

View File

@ -73,10 +73,10 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo=
k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4=
k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg=
k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y=
k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg=
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=

View File

@ -4,6 +4,11 @@ ARG BASE_IMAGE
FROM ${BASE_IMAGE} as updated_base
# TODO: remove the following cmd, when issue
# https://github.com/ceph/ceph-container/issues/2034 is fixed.
RUN dnf config-manager --disable \
tcmu-runner,tcmu-runner-source,tcmu-runner-noarch,ceph-iscsi || true
RUN dnf -y update \
&& dnf clean all \
&& rm -rf /var/cache/yum
@ -29,11 +34,6 @@ RUN source /build.env && \
# test if the downloaded version of Golang works (different arch?)
RUN ${GOROOT}/bin/go version && ${GOROOT}/bin/go env
# TODO: remove the following cmd, when issue
# https://github.com/ceph/ceph-container/issues/2034 is fixed.
RUN dnf config-manager --disable \
tcmu-runner,tcmu-runner-source,tcmu-runner-noarch || true
RUN dnf -y install --nodocs \
librados-devel librbd-devel \
/usr/bin/cc \

View File

@ -4,12 +4,8 @@
- [Pre-upgrade considerations](#pre-upgrade-considerations)
- [Snapshot-controller and snapshot crd](#snapshot-controller-and-snapshot-crd)
- [Snapshot API version support matrix](#snapshot-api-version-support-matrix)
- [Upgrading from v3.2 to v3.3](#upgrading-from-v32-to-v33)
- [Upgrading from v3.3 to v3.4](#upgrading-from-v33-to-v34)
- [Upgrading from v3.4 to v3.5](#upgrading-from-v34-to-v35)
- [Upgrading from v3.5 to v3.6](#upgrading-from-v35-to-v36)
- [Upgrading from v3.6 to v3.7](#upgrading-from-v36-to-v37)
- [Upgrading from v3.7 to v3.8](#upgrading-from-v37-to-v38)
- [Upgrading from previous releases](#upgrading-from-previous-releases)
- [Upgrading from v3.8 to v3.9](#upgrading-from-v38-to-v39)
- [Upgrading CephFS](#upgrading-cephfs)
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
@ -18,6 +14,7 @@
- [2.1 Update the CephFS Nodeplugin RBAC](#21-update-the-cephfs-nodeplugin-rbac)
- [2.2 Update the CephFS Nodeplugin daemonset](#22-update-the-cephfs-nodeplugin-daemonset)
- [2.3 Manual deletion of CephFS Nodeplugin daemonset pods](#23-manual-deletion-of-cephfs-nodeplugin-daemonset-pods)
- [2.4 Modifying MountOptions in Storageclass and PersistentVolumes](#24-modifying-mountoptions-in-storageclass-and-persistentvolumes)
- [Delete removed CephFS PSP, Role and RoleBinding](#delete-removed-cephfs-psp-role-and-rolebinding)
- [Upgrading RBD](#upgrading-rbd)
- [3. Upgrade RBD Provisioner resources](#3-upgrade-rbd-provisioner-resources)
@ -56,7 +53,7 @@ To avoid this issue in future upgrades, we recommend that you do not use the
fuse client as of now.
This guide will walk you through the steps to upgrade the software in a cluster
from v3.7 to v3.8
from v3.8 to v3.9
### Snapshot-controller and snapshot crd
@ -73,32 +70,24 @@ controller and snapshot CRD. more info can be found
**Note:** We recommend to use {sidecar, controller, crds} of same version
## Upgrading from v3.2 to v3.3
## Upgrading from previous releases
Refer [upgrade-from-v3.2-v3.3](https://github.com/ceph/ceph-csi/blob/v3.3.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.2 to v3.3
To upgrade from previous releases, refer to the following:
## Upgrading from v3.3 to v3.4
- [upgrade-from-v3.2-v3.3](https://github.com/ceph/ceph-csi/blob/v3.3.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.2 to v3.3
- [upgrade-from-v3.3-v3.4](https://github.com/ceph/ceph-csi/blob/v3.4.0/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.3 to v3.4
- [upgrade-from-v3.4-v3.5](https://github.com/ceph/ceph-csi/blob/v3.5.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.4 to v3.5
- [upgrade-from-v3.5-v3.6](https://github.com/ceph/ceph-csi/blob/v3.6.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.5 to v3.6
- [upgrade-from-v3.6-v3.7](https://github.com/ceph/ceph-csi/blob/v3.7.2/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.6 to v3.7
- [upgrade-from-v3.7-v3.8](https://github.com/ceph/ceph-csi/blob/v3.8.0/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.7 to v3.8
Refer [upgrade-from-v3.3-v3.4](https://github.com/ceph/ceph-csi/blob/v3.4.0/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.3 to v3.4
## Upgrading from v3.4 to v3.5
Refer [upgrade-from-v3.4-v3.5](https://github.com/ceph/ceph-csi/blob/v3.5.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.4 to v3.5
## Upgrading from v3.5 to v3.6
Refer [upgrade-from-v3.5-v3.6](https://github.com/ceph/ceph-csi/blob/v3.6.1/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.5 to v3.6
## Upgrading from v3.6 to v3.7
Refer [upgrade-from-v3.6-v3.7](https://github.com/ceph/ceph-csi/blob/v3.7.2/docs/ceph-csi-upgrade.md)
to upgrade from cephcsi v3.6 to v3.7
## Upgrading from v3.7 to v3.8
## Upgrading from v3.8 to v3.9
**Ceph-csi releases from devel are expressly unsupported.** It is strongly
recommended that you use [official
@ -108,15 +97,15 @@ that will not be supported in the official releases. Builds from the devel
branch can have functionality changed and even removed at any time without
compatibility support and without prior notice.
**Also, we do not recommend any direct upgrades to 3.8 except from 3.7 to 3.8.**
For example, upgrading from 3.6 to 3.8 is not recommended.
**Also, we do not recommend any direct upgrades to 3.9 except from 3.8 to 3.9.**
For example, upgrading from 3.7 to 3.9 is not recommended.
git checkout v3.8.0 tag
git checkout v3.9.0 tag
```bash
git clone https://github.com/ceph/ceph-csi.git
cd ./ceph-csi
git checkout v3.8.0
git checkout v3.9.0
```
```console
@ -127,6 +116,9 @@ Warning: kubectl apply should be used on resource created by either kubectl crea
### Upgrading CephFS
If existing cephfs storageclasses' `MountOptions` are set, please refer to
[modifying mount options](#24-modifying-mountoptions-in-storageclass-and-persistentvolumes)
section.
Upgrading cephfs csi includes upgrade of cephfs driver and as well as
kubernetes sidecar containers and also the permissions required for the
kubernetes sidecar containers, lets upgrade the things one by one
@ -233,10 +225,35 @@ For each node:
- Drain your application pods from the node
- Delete the CSI driver pods on the node
- The pods to delete will be named with a csi-cephfsplugin prefix and have a
random suffix on each node. However, no need to delete the provisioner
pods: csi-cephfsplugin-provisioner-* .
random suffix on each node. However, no need to delete the provisioner
pods: csi-cephfsplugin-provisioner-* .
- The pod deletion causes the pods to be restarted and updated automatically
on the node.
on the node.
##### 2.4 Modifying MountOptions in Storageclass and PersistentVolumes
CephCSI, starting from release v3.9.0, will pass the options specified in the
StorageClass's `MountOptions` during both `NodeStageVolume` (kernel cephfs or
ceph-fuse mount operation) and `NodePublishVolume` (bind mount) operations.
Therefore, only common options that is acceptable during both the above
described operations needs to be set in StorageClass's `MountOptions`.
If invalid mount options are set in StorageClass's `MountOptions`
such as `"debug"`, the mounting of cephFS PVCs will fail.
Follow the below steps to update the StorageClass's `MountOptions`:
- Take a backup of the StorageClass using
`kubectl get sc <storageclass-name> -o yaml > sc.yaml`.
- Edit `sc.yaml` to remove the invalid mount options from `MountOptions` field.
- Delete the StorageClass using `kubectl delete sc <storageclass-name>`.
- Recreate the StorageClass using `kubectl create -f sc.yaml`.
Follow the below steps to update the PersistentVolume's `MountOptions`:
- Identify cephFS PersistentVolumes using
`kubectl get pv | grep <storageclass-name>`.
- and remove invalid mount options from `MountOptions` field
in the PersistentVolume's using `kubectl edit pv <pv-name>`.
#### Delete removed CephFS PSP, Role and RoleBinding
@ -252,7 +269,7 @@ kubectl delete role cephfs-csi-nodeplugin-psp --ignore-not-found
kubectl delete rolebinding cephfs-csi-nodeplugin-psp --ignore-not-found
```
we have successfully upgraded cephfs csi from v3.7 to v3.8
we have successfully upgraded cephfs csi from v3.8 to v3.9
### Upgrading RBD
@ -335,7 +352,7 @@ kubectl delete role rbd-csi-vault-token-review-psp --ignore-not-found
kubectl delete rolebinding rbd-csi-vault-token-review-psp --ignore-not-found
```
we have successfully upgraded RBD csi from v3.7 to v3.8
we have successfully upgraded RBD csi from v3.8 to v3.9
### Upgrading NFS
@ -397,7 +414,7 @@ daemonset.apps/csi-nfsplugin configured
service/csi-metrics-nfsplugin configured
```
we have successfully upgraded nfs csi from v3.7 to v3.8
we have successfully upgraded nfs csi from v3.8 to v3.9
### CSI Sidecar containers consideration

View File

@ -300,12 +300,6 @@ opening fresh PRs, rebase of PRs and force pushing changes to existing PRs.
Right now, we also have below commands to manually retrigger the CI jobs
1. To retrigger all the CI jobs, comment the PR with command: `/retest all`
**Note**:
This will rerun all the jobs including the jobs which are already passed
1. To retrigger a specific CI job, comment the PR with command: `/retest <job-name>`
example:

View File

@ -337,6 +337,46 @@ var _ = Describe(cephfsType, func() {
}
})
By("validate fuseMountOptions", func() {
params := map[string]string{
"mounter": "fuse",
"fuseMountOptions": "default_permissions",
}
err := createCephfsStorageClass(f.ClientSet, f, true, params)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}
mountFlags := []string{"default_permissions"}
err = checkMountOptions(pvcPath, appPath, f, mountFlags)
if err != nil {
framework.Failf("failed to validate fuse mount options: %v", err)
}
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
})
By("validate kernelMountOptions", func() {
params := map[string]string{
"mounter": "kernel",
"kernelMountOptions": "nocrc",
}
err := createCephfsStorageClass(f.ClientSet, f, true, params)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}
mountFlags := []string{"nocrc"}
err = checkMountOptions(pvcPath, appPath, f, mountFlags)
if err != nil {
framework.Failf("failed to validate kernel mount options: %v", err)
}
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
})
By("verify generic ephemeral volume support", func() {
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
if err != nil {

28
go.mod
View File

@ -4,14 +4,14 @@ go 1.20
require (
github.com/IBM/keyprotect-go-client v0.10.0
github.com/aws/aws-sdk-go v1.44.285
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0
github.com/aws/aws-sdk-go v1.44.295
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
// TODO: API for managing subvolume metadata and snapshot metadata requires `ceph_ci_untested` build-tag
github.com/ceph/go-ceph v0.21.0
github.com/ceph/go-ceph v0.22.0
github.com/container-storage-interface/spec v1.8.0
github.com/csi-addons/replication-lib-utils v0.2.0
github.com/csi-addons/spec v0.2.0
github.com/csi-addons/spec v0.2.1-0.20230606140122-d20966d2e444
github.com/gemalto/kmip-go v0.0.9
github.com/golang/protobuf v1.5.3
github.com/google/fscrypt v0.3.4
@ -19,10 +19,10 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.9.2
github.com/kubernetes-csi/csi-lib-utils v0.13.0
github.com/kubernetes-csi/csi-lib-utils v0.14.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo/v2 v2.10.0
github.com/onsi/ginkgo/v2 v2.11.0
github.com/onsi/gomega v1.27.8
github.com/pkg/xattr v0.4.9
github.com/prometheus/client_golang v1.16.0
@ -30,13 +30,13 @@ require (
golang.org/x/crypto v0.10.0
golang.org/x/net v0.11.0
golang.org/x/sys v0.9.0
google.golang.org/grpc v1.56.0
google.golang.org/protobuf v1.30.0
google.golang.org/grpc v1.56.1
google.golang.org/protobuf v1.31.0
//
// when updating k8s.io/kubernetes, make sure to update the replace section too
//
k8s.io/api v0.27.2
k8s.io/apimachinery v0.27.2
k8s.io/api v0.27.3
k8s.io/apimachinery v0.27.3
k8s.io/client-go v12.0.0+incompatible
k8s.io/cloud-provider v0.27.2
k8s.io/klog/v2 v2.100.1
@ -54,10 +54,10 @@ require (
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/aws/aws-sdk-go-v2 v1.18.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect
github.com/aws/aws-sdk-go-v2 v1.18.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect

48
go.sum
View File

@ -154,18 +154,18 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.44.285 h1:rgoWYl+NdmKzRgoi/fZLEtGXOjCkcWIa5jPH02Uahdo=
github.com/aws/aws-sdk-go v1.44.285/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 h1:kG5eQilShqmJbv11XL1VpyDbaEJzWxd4zRiCG30GSn4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 h1:vFQlirhuM8lLlpI7imKOMsjdQLuN9CPi+k44F/OFVsk=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 h1:0iKliEXAcCa2qVtRs7Ot5hItA2MsufrphbRFlz1Owxo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE9iTYD0gFmXVax9E=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
github.com/aws/aws-sdk-go v1.44.295 h1:SGjU1+MqttXfRiWHD6WU0DRhaanJgAFY+xIhEaugV8Y=
github.com/aws/aws-sdk-go v1.44.295/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo=
github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
@ -195,8 +195,8 @@ github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
github.com/ceph/go-ceph v0.21.0 h1:nx+6FARWQqQ3ctSVwljeeauh0wgyVvd17i23d75mpA8=
github.com/ceph/go-ceph v0.21.0/go.mod h1:574HYNbG0RZV7lBemoCIxrQEUlo/1BzN42y5NgDr4vg=
github.com/ceph/go-ceph v0.22.0 h1:neClxdgly+Bb2IfmC3vP4Rihh//BM0YpJW/685T+yT4=
github.com/ceph/go-ceph v0.22.0/go.mod h1:vyoYT04bHOlrjFfu1NmmtqKdH5uRBVKPKq7viyUL4sc=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
@ -260,8 +260,8 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr
github.com/csi-addons/replication-lib-utils v0.2.0 h1:tGs42wfjkObbBo/98a3uxTFWEJ1dq5PIMqPWtdLd040=
github.com/csi-addons/replication-lib-utils v0.2.0/go.mod h1:ROQlEsc2EerVtc/K/C+6Hx8pqaQ9MVy9xFFpyKfI9lc=
github.com/csi-addons/spec v0.1.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/csi-addons/spec v0.2.0 h1:Ews7bxpN9P6nFxl1XvMg87cR1wLROdH1FzSfLfb4VfI=
github.com/csi-addons/spec v0.2.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/csi-addons/spec v0.2.1-0.20230606140122-d20966d2e444 h1:hWVCrZWVHctpWt6cQxV1I6dW3wpBDMg3Vrvu9uAuUxw=
github.com/csi-addons/spec v0.2.1-0.20230606140122-d20966d2e444/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -791,8 +791,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w=
github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE=
github.com/kubernetes-csi/csi-lib-utils v0.14.0 h1:pusB32LkSd7GhuT8Z6cyRFqByujc28ygWV97ndaT19s=
github.com/kubernetes-csi/csi-lib-utils v0.14.0/go.mod h1:uX8xidqxGJOLXtsfCCVsxWtZl/9NiLyd2DD3Nb+KoP4=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 h1:cMM5AB37e9aRGjErygVT6EuBPB6s5a+l95OPERmSlVM=
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0/go.mod h1:VQVLCPGDX5l6V5PezjlDXLa+SpCbWSVU7B16cFWVVeE=
@ -915,8 +915,8 @@ github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1L
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs=
github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE=
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -1786,8 +1786,8 @@ google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE=
google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1804,8 +1804,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -32,8 +32,8 @@ type FenceControllerServer struct {
*fence.UnimplementedFenceControllerServer
}
// NewFenceControllerServer creates a new IdentityServer which handles
// the Identity Service requests from the CSI-Addons specification.
// NewFenceControllerServer creates a new FenceControllerServer which handles
// the FenceController Service requests from the CSI-Addons specification.
func NewFenceControllerServer() *FenceControllerServer {
return &FenceControllerServer{}
}

View File

@ -37,8 +37,8 @@ type ReclaimSpaceControllerServer struct {
*rs.UnimplementedReclaimSpaceControllerServer
}
// NewReclaimSpaceControllerServer creates a new IdentityServer which handles
// the Identity Service requests from the CSI-Addons specification.
// NewReclaimSpaceControllerServer creates a new ReclaimSpaceControllerServer which handles
// the ReclaimSpace Service requests from the CSI-Addons specification.
func NewReclaimSpaceControllerServer() *ReclaimSpaceControllerServer {
return &ReclaimSpaceControllerServer{}
}

View File

@ -36,6 +36,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
)
@ -82,6 +83,12 @@ type ReplicationServer struct {
*corerbd.ControllerServer
}
// NewReplicationServer creates a new ReplicationServer which handles
// the Replication Service requests from the CSI-Addons specification.
func NewReplicationServer(c *corerbd.ControllerServer) *ReplicationServer {
return &ReplicationServer{ControllerServer: c}
}
func (rs *ReplicationServer) RegisterService(server grpc.ServiceRegistrar) {
replication.RegisterControllerServer(server, rs)
}
@ -326,7 +333,12 @@ func (rs *ReplicationServer) DisableVolumeReplication(ctx context.Context,
case librbd.MirrorImageDisabling:
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", volumeID)
case librbd.MirrorImageEnabled:
return corerbd.DisableVolumeReplication(rbdVol, mirroringInfo, force)
err = rbdVol.DisableVolumeReplication(mirroringInfo, force)
if err != nil {
return nil, getGRPCError(err)
}
return &replication.DisableVolumeReplicationResponse{}, nil
default:
return nil, status.Errorf(codes.InvalidArgument, "image is in %s Mode", mirroringInfo.State)
}
@ -627,9 +639,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
err = rbdVol.ResyncVol(localStatus, req.Force)
if err != nil {
log.ErrorLog(ctx, err.Error())
return nil, err
return nil, getGRPCError(err)
}
err = checkVolumeResyncStatus(localStatus)
@ -649,6 +659,32 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
return resp, nil
}
func getGRPCError(err error) error {
if err == nil {
return status.Error(codes.OK, codes.OK.String())
}
errorStatusMap := map[error]codes.Code{
corerbd.ErrFetchingLocalState: codes.Internal,
corerbd.ErrResyncImageFailed: codes.Internal,
corerbd.ErrDisableImageMirroringFailed: codes.Internal,
corerbd.ErrFetchingMirroringInfo: codes.Internal,
corerbd.ErrInvalidArgument: codes.InvalidArgument,
corerbd.ErrAborted: codes.Aborted,
corerbd.ErrFailedPrecondition: codes.FailedPrecondition,
corerbd.ErrUnavailable: codes.Unavailable,
}
for e, code := range errorStatusMap {
if errors.Is(err, e) {
return status.Error(code, err.Error())
}
}
// Handle any other non nil error not listed in the map
return status.Error(codes.Unknown, err.Error())
}
// GetVolumeReplicationInfo extracts the RBD volume information from the volumeID, If the
// image is present, mirroring is enabled and the image is in primary state.
func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
@ -719,18 +755,14 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
}
description := remoteStatus.Description
lastSyncTime, err := getLastSyncTime(description)
resp, err := getLastSyncInfo(description)
if err != nil {
if errors.Is(err, corerbd.ErrLastSyncTimeNotFound) {
return nil, status.Errorf(codes.NotFound, "failed to get last sync time: %v", err)
return nil, status.Errorf(codes.NotFound, "failed to get last sync info: %v", err)
}
log.ErrorLog(ctx, err.Error())
return nil, status.Errorf(codes.Internal, "failed to get last sync time: %v", err)
}
resp := &replication.GetVolumeReplicationInfoResponse{
LastSyncTime: lastSyncTime,
return nil, status.Errorf(codes.Internal, "failed to get last sync info: %v", err)
}
return resp, nil
@ -756,42 +788,69 @@ func RemoteStatus(gmis *librbd.GlobalMirrorImageStatus) (librbd.SiteMirrorImageS
return ss, err
}
// This function gets the local snapshot time from the description
// of localStatus and converts it into required type.
func getLastSyncTime(description string) (*timestamppb.Timestamp, error) {
// This function gets the local snapshot time, last sync snapshot seconds
// and last sync bytes from the description of localStatus and convert
// it into required types.
func getLastSyncInfo(description string) (*replication.GetVolumeReplicationInfoResponse, error) {
// Format of the description will be as followed:
// description = "replaying,{"bytes_per_second":0.0,
// "bytes_per_snapshot":149504.0,"local_snapshot_timestamp":1662655501
// ,"remote_snapshot_timestamp":1662655501}"
// description = `replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":81920.0,
// "last_snapshot_bytes":81920,"last_snapshot_sync_seconds":0,
// "local_snapshot_timestamp":1684675261,
// "remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`
// In case there is no last snapshot bytes returns 0 as the
// LastSyncBytes is optional.
// In case there is no last snapshot sync seconds, it returns nil as the
// LastSyncDuration is optional.
// In case there is no local snapshot timestamp return an error as the
// LastSyncTime is required.
var response replication.GetVolumeReplicationInfoResponse
if description == "" {
return nil, fmt.Errorf("empty description: %w", corerbd.ErrLastSyncTimeNotFound)
}
splittedString := strings.SplitN(description, ",", 2)
if len(splittedString) == 1 {
return nil, fmt.Errorf("no local snapshot timestamp: %w", corerbd.ErrLastSyncTimeNotFound)
return nil, fmt.Errorf("no snapshot details: %w", corerbd.ErrLastSyncTimeNotFound)
}
type localStatus struct {
LocalSnapshotTime int64 `json:"local_snapshot_timestamp"`
LocalSnapshotTime int64 `json:"local_snapshot_timestamp"`
LastSnapshotBytes int64 `json:"last_snapshot_bytes"`
LastSnapshotDuration *int64 `json:"last_snapshot_sync_seconds"`
}
var localSnapTime localStatus
err := json.Unmarshal([]byte(splittedString[1]), &localSnapTime)
var localSnapInfo localStatus
err := json.Unmarshal([]byte(splittedString[1]), &localSnapInfo)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal description: %w", err)
return nil, fmt.Errorf("failed to unmarshal local snapshot info: %w", err)
}
// If the json unmarsal is successful but the local snapshot time is 0, we
// need to consider it as an error as the LastSyncTime is required.
if localSnapTime.LocalSnapshotTime == 0 {
if localSnapInfo.LocalSnapshotTime == 0 {
return nil, fmt.Errorf("empty local snapshot timestamp: %w", corerbd.ErrLastSyncTimeNotFound)
}
if localSnapInfo.LastSnapshotDuration != nil {
// converts localSnapshotDuration of type int64 to string format with
// appended `s` seconds required for time.ParseDuration
lastDurationTime := fmt.Sprintf("%ds", *localSnapInfo.LastSnapshotDuration)
// parse Duration from the lastDurationTime string
lastDuration, err := time.ParseDuration(lastDurationTime)
if err != nil {
return nil, fmt.Errorf("failed to parse last snapshot duration: %w", err)
}
// converts time.Duration to *durationpb.Duration
response.LastSyncDuration = durationpb.New(lastDuration)
}
lastUpdateTime := time.Unix(localSnapTime.LocalSnapshotTime, 0)
// converts localSnapshotTime of type int64 to time.Time
lastUpdateTime := time.Unix(localSnapInfo.LocalSnapshotTime, 0)
lastSyncTime := timestamppb.New(lastUpdateTime)
return lastSyncTime, nil
response.LastSyncTime = lastSyncTime
response.LastSyncBytes = localSnapInfo.LastSnapshotBytes
return &response, nil
}
func checkVolumeResyncStatus(localStatus librbd.SiteMirrorImageStatus) error {

View File

@ -18,7 +18,9 @@ package rbd
import (
"context"
"errors"
"reflect"
"strconv"
"strings"
"testing"
"time"
@ -27,6 +29,11 @@ import (
librbd "github.com/ceph/go-ceph/rbd"
"github.com/ceph/go-ceph/rbd/admin"
"github.com/csi-addons/spec/lib/go/replication"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
)
@ -438,59 +445,202 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}
}
func TestValidateLastSyncTime(t *testing.T) {
func TestValidateLastSyncInfo(t *testing.T) {
t.Parallel()
duration, err := time.ParseDuration(strconv.Itoa(int(56743)) + "s")
if err != nil {
t.Errorf("failed to parse duration)")
}
tests := []struct {
name string
description string
timestamp *timestamppb.Timestamp
info *replication.GetVolumeReplicationInfoResponse
expectedErr string
}{
{
"valid description",
name: "valid description",
//nolint:lll // sample output cannot be split into multiple lines.
`replaying,{"bytes_per_second":0.0,"bytes_per_snapshot":149504.0,"local_snapshot_timestamp":1662655501,"remote_snapshot_timestamp":1662655501}`,
timestamppb.New(time.Unix(1662655501, 0)),
"",
description: `replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":81920.0,"last_snapshot_bytes":81920,"last_snapshot_sync_seconds":56743,"local_snapshot_timestamp":1684675261,"remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncTime: timestamppb.New(time.Unix(1684675261, 0)),
LastSyncDuration: durationpb.New(duration),
LastSyncBytes: 81920,
},
expectedErr: "",
},
{
"empty description",
"",
nil,
corerbd.ErrLastSyncTimeNotFound.Error(),
name: "empty description",
description: "",
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncTime: nil,
LastSyncDuration: nil,
LastSyncBytes: 0,
},
expectedErr: corerbd.ErrLastSyncTimeNotFound.Error(),
},
{
"description without local_snapshot_timestamp",
`replaying,{"bytes_per_second":0.0,"bytes_per_snapshot":149504.0,"remote_snapshot_timestamp":1662655501}`,
nil,
"",
name: "description without last_snapshot_bytes",
//nolint:lll // sample output cannot be split into multiple lines.
description: `replaying, {"bytes_per_second":0.0,"last_snapshot_sync_seconds":56743,"local_snapshot_timestamp":1684675261,"remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: durationpb.New(duration),
LastSyncTime: timestamppb.New(time.Unix(1684675261, 0)),
LastSyncBytes: 0,
},
expectedErr: "",
},
{
"description with invalid JSON",
`replaying,{"bytes_per_second":0.0,"bytes_per_snapshot":149504.0","remote_snapshot_timestamp":1662655501`,
nil,
"failed to unmarshal",
name: "description without local_snapshot_time",
//nolint:lll // sample output cannot be split into multiple lines.
description: `replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":81920.0,"last_snapshot_bytes":81920,"last_snapshot_sync_seconds":56743,"remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: nil,
LastSyncTime: nil,
LastSyncBytes: 0,
},
expectedErr: corerbd.ErrLastSyncTimeNotFound.Error(),
},
{
"description with no JSON",
`replaying`,
nil,
corerbd.ErrLastSyncTimeNotFound.Error(),
name: "description without last_snapshot_sync_seconds",
//nolint:lll // sample output cannot be split into multiple lines.
description: `replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":81920.0,"last_snapshot_bytes":81920,"local_snapshot_timestamp":1684675261,"remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: nil,
LastSyncTime: timestamppb.New(time.Unix(1684675261, 0)),
LastSyncBytes: 81920,
},
expectedErr: "",
},
{
name: "description with last_snapshot_sync_seconds = 0",
//nolint:lll // sample output cannot be split into multiple lines.
description: `replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":81920.0,"last_snapshot_sync_seconds":0,
"last_snapshot_bytes":81920,"local_snapshot_timestamp":1684675261,"remote_snapshot_timestamp":1684675261,"replay_state":"idle"}`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: durationpb.New(time.Duration(0)),
LastSyncTime: timestamppb.New(time.Unix(1684675261, 0)),
LastSyncBytes: 81920,
},
expectedErr: "",
},
{
name: "description with invalid JSON",
//nolint:lll // sample output cannot be split into multiple lines.
description: `replaying,{"bytes_per_second":0.0,"last_snapshot_bytes":81920","bytes_per_snapshot":149504.0","remote_snapshot_timestamp":1662655501`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: nil,
LastSyncTime: nil,
LastSyncBytes: 0,
},
expectedErr: "failed to unmarshal",
},
{
name: "description with no JSON",
description: `replaying`,
info: &replication.GetVolumeReplicationInfoResponse{
LastSyncDuration: nil,
LastSyncTime: nil,
LastSyncBytes: 0,
},
expectedErr: corerbd.ErrLastSyncTimeNotFound.Error(),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ts, err := getLastSyncTime(tt.description)
teststruct, err := getLastSyncInfo(tt.description)
if err != nil && !strings.Contains(err.Error(), tt.expectedErr) {
// returned error
t.Errorf("getLastSyncTime() returned error, expected: %v, got: %v",
t.Errorf("getLastSyncInfo() returned error, expected: %v, got: %v",
tt.expectedErr, err)
}
if !ts.AsTime().Equal(tt.timestamp.AsTime()) {
t.Errorf("getLastSyncTime() %v, expected %v", ts, tt.timestamp)
if teststruct != nil {
if teststruct.LastSyncTime.GetSeconds() != tt.info.LastSyncTime.GetSeconds() {
t.Errorf("name: %v, getLastSyncInfo() %v, expected %v", tt.name, teststruct.LastSyncTime, tt.info.LastSyncTime)
}
if tt.info.LastSyncDuration == nil && teststruct.LastSyncDuration != nil {
t.Errorf("name: %v, getLastSyncInfo() %v, expected %v", tt.name, teststruct.LastSyncDuration,
tt.info.LastSyncDuration)
}
if teststruct.LastSyncDuration.GetSeconds() != tt.info.LastSyncDuration.GetSeconds() {
t.Errorf("name: %v, getLastSyncInfo() %v, expected %v", tt.name, teststruct.LastSyncDuration,
tt.info.LastSyncDuration)
}
if teststruct.LastSyncBytes != tt.info.LastSyncBytes {
t.Errorf("name: %v, getLastSyncInfo() %v, expected %v", tt.name, teststruct.LastSyncBytes, tt.info.LastSyncBytes)
}
}
})
}
}
func TestGetGRPCError(t *testing.T) {
t.Parallel()
tests := []struct {
name string
err error
expectedErr error
}{
{
name: "FetchingLocalStateFailed",
err: corerbd.ErrFetchingLocalState,
expectedErr: status.Error(codes.Internal, corerbd.ErrFetchingLocalState.Error()),
},
{
name: "ResyncImageFailed",
err: corerbd.ErrResyncImageFailed,
expectedErr: status.Error(codes.Internal, corerbd.ErrResyncImageFailed.Error()),
},
{
name: "DisableImageMirroringFailed",
err: corerbd.ErrDisableImageMirroringFailed,
expectedErr: status.Error(codes.Internal, corerbd.ErrDisableImageMirroringFailed.Error()),
},
{
name: "FetchingMirroringInfoFailed",
err: corerbd.ErrFetchingMirroringInfo,
expectedErr: status.Error(codes.Internal, corerbd.ErrFetchingMirroringInfo.Error()),
},
{
name: "InvalidArgument",
err: corerbd.ErrInvalidArgument,
expectedErr: status.Error(codes.InvalidArgument, corerbd.ErrInvalidArgument.Error()),
},
{
name: "Aborted",
err: corerbd.ErrAborted,
expectedErr: status.Error(codes.Aborted, corerbd.ErrAborted.Error()),
},
{
name: "FailedPrecondition",
err: corerbd.ErrFailedPrecondition,
expectedErr: status.Error(codes.FailedPrecondition, corerbd.ErrFailedPrecondition.Error()),
},
{
name: "Unavailable",
err: corerbd.ErrUnavailable,
expectedErr: status.Error(codes.Unavailable, corerbd.ErrUnavailable.Error()),
},
{
name: "InvalidError",
err: errors.New("some error"),
expectedErr: status.Error(codes.Unknown, "some error"),
},
{
name: "NilError",
err: nil,
expectedErr: status.Error(codes.OK, "ok string"),
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
result := getGRPCError(tt.err)
assert.Equal(t, tt.expectedErr, result)
})
}
}

View File

@ -33,12 +33,10 @@ import (
// Driver contains the default identity,node and controller struct.
type Driver struct {
cd *csicommon.CSIDriver
cd *csicommon.CSIDriver
ids *rbd.IdentityServer
ns *rbd.NodeServer
cs *rbd.ControllerServer
rs *casrbd.ReplicationServer
// cas is the CSIAddonsServer where CSI-Addons services are handled
cas *csiaddons.CSIAddonsServer
@ -66,10 +64,6 @@ func NewControllerServer(d *csicommon.CSIDriver) *rbd.ControllerServer {
}
}
func NewReplicationServer(c *rbd.ControllerServer) *casrbd.ReplicationServer {
return &casrbd.ReplicationServer{ControllerServer: c}
}
// NewNodeServer initialize a node server for rbd CSI driver.
func NewNodeServer(
d *csicommon.CSIDriver,
@ -105,12 +99,6 @@ func (r *Driver) Run(conf *util.Config) {
// Create instances of the volume and snapshot journal
rbd.InitJournals(conf.InstanceID)
// configre CSI-Addons server and components
err = r.setupCSIAddonsServer(conf)
if err != nil {
log.FatalLogMsg(err.Error())
}
// Initialize default library driver
r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if r.cd == nil {
@ -175,9 +163,12 @@ func (r *Driver) Run(conf *util.Config) {
r.cs = NewControllerServer(r.cd)
r.cs.ClusterName = conf.ClusterName
r.cs.SetMetadata = conf.SetMetadata
log.WarningLogMsg("replication service running on controller server is deprecated " +
"and replaced by CSI-Addons, see https://github.com/ceph/ceph-csi/issues/3314 for more details")
r.rs = NewReplicationServer(r.cs)
}
// configre CSI-Addons server and components
err = r.setupCSIAddonsServer(conf)
if err != nil {
log.FatalLogMsg(err.Error())
}
s := csicommon.NewNonBlockingGRPCServer()
@ -185,9 +176,6 @@ func (r *Driver) Run(conf *util.Config) {
IS: r.ids,
CS: r.cs,
NS: r.ns,
// Register the replication controller to expose replication
// operations.
RS: r.rs,
}
s.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics {
@ -231,7 +219,7 @@ func (r *Driver) setupCSIAddonsServer(conf *util.Config) error {
fcs := casrbd.NewFenceControllerServer()
r.cas.RegisterService(fcs)
rcs := NewReplicationServer(NewControllerServer(r.cd))
rcs := casrbd.NewReplicationServer(NewControllerServer(r.cd))
r.cas.RegisterService(rcs)
}

View File

@ -45,4 +45,22 @@ var (
// ErrLastSyncTimeNotFound is returned when last sync time is not found for
// the image.
ErrLastSyncTimeNotFound = errors.New("last sync time not found")
// ErrFailedPrecondition is returned when operation is rejected because the system is not in a state
// required for the operation's execution.
ErrFailedPrecondition = errors.New("system is not in a state required for the operation's execution")
// ErrUnavailable is returned when the image needs to be recreated
// locally and may be corrected by retrying with a backoff.
ErrUnavailable = errors.New("image needs to be recreated")
// ErrAborted is returned when the operation is aborted.
ErrAborted = errors.New("operation got aborted")
// ErrInvalidArgument is returned when the client specified an invalid argument.
ErrInvalidArgument = errors.New("invalid arguments provided")
// ErrFetchingLocalState is returned when the operation to fetch local state fails.
ErrFetchingLocalState = errors.New("failed to get local state")
// ErrDisableImageMirroringFailed is returned when the operation to disable image mirroring fails.
ErrDisableImageMirroringFailed = errors.New("failed to disable image mirroring")
// ErrFetchingMirroringInfo is returned when the operation to fetch mirroring info of image fails.
ErrFetchingMirroringInfo = errors.New("failed to get mirroring info of image")
// ErrResyncImageFailed is returned when the operation to resync the image fails.
ErrResyncImageFailed = errors.New("failed to resync image")
)

View File

@ -512,6 +512,14 @@ func resizeNodeStagePath(ctx context.Context,
if err != nil {
return status.Error(codes.Internal, err.Error())
}
// If this is a AccessType=Block volume, do not attempt
// filesystem resize. The application is in charge of the data
// on top of the raw block-device, we can not assume there is a
// filesystem at all.
if isBlock {
return nil
}
}
// check stagingPath needs resize.
ok, err = resizer.NeedResize(devicePath, stagingTargetPath)

View File

@ -18,12 +18,10 @@ package rbd
import (
"context"
"fmt"
"strings"
librbd "github.com/ceph/go-ceph/rbd"
"github.com/csi-addons/spec/lib/go/replication"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (rv *rbdVolume) ResyncVol(localStatus librbd.SiteMirrorImageStatus, force bool) error {
@ -31,19 +29,18 @@ func (rv *rbdVolume) ResyncVol(localStatus librbd.SiteMirrorImageStatus, force b
// If the force option is not set return the error message to retry
// with Force option.
if !force {
return status.Errorf(codes.FailedPrecondition,
"image is in %q state, description (%s). Force resync to recover volume",
localStatus.State, localStatus.Description)
return fmt.Errorf("%w: image is in %q state, description (%s). Force resync to recover volume",
ErrFailedPrecondition, localStatus.State, localStatus.Description)
}
err := rv.resyncImage()
if err != nil {
return status.Error(codes.Internal, err.Error())
return fmt.Errorf("%w: failed to resync image: %w", ErrResyncImageFailed, err)
}
// If we issued a resync, return a non-final error as image needs to be recreated
// locally. Caller retries till RBD syncs an initial version of the image to
// report its status in the resync request.
return status.Error(codes.Unavailable, "awaiting initial resync due to split brain")
return fmt.Errorf("%w: awaiting initial resync due to split brain", ErrUnavailable)
}
return nil
@ -85,10 +82,10 @@ func resyncRequired(localStatus librbd.SiteMirrorImageStatus) bool {
return false
}
func DisableVolumeReplication(rbdVol *rbdVolume,
func (rv *rbdVolume) DisableVolumeReplication(
mirroringInfo *librbd.MirrorImageInfo,
force bool,
) (*replication.DisableVolumeReplicationResponse, error) {
) error {
if !mirroringInfo.Primary {
// Return success if the below condition is met
// Local image is secondary
@ -102,32 +99,30 @@ func DisableVolumeReplication(rbdVol *rbdVolume,
// disabled the image on all the remote (secondary) clusters will get
// auto-deleted. This helps in garbage collecting the volume
// replication Kubernetes artifacts after failback operation.
localStatus, rErr := rbdVol.GetLocalState()
localStatus, rErr := rv.GetLocalState()
if rErr != nil {
return nil, status.Error(codes.Internal, rErr.Error())
return fmt.Errorf("%w: %w", ErrFetchingLocalState, rErr)
}
if localStatus.Up && localStatus.State == librbd.MirrorImageStatusStateReplaying {
return &replication.DisableVolumeReplicationResponse{}, nil
return nil
}
return nil, status.Errorf(codes.InvalidArgument,
"secondary image status is up=%t and state=%s",
localStatus.Up,
localStatus.State)
return fmt.Errorf("%w: secondary image status is up=%t and state=%s",
ErrInvalidArgument, localStatus.Up, localStatus.State)
}
err := rbdVol.DisableImageMirroring(force)
err := rv.DisableImageMirroring(force)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
return fmt.Errorf("%w: %w", ErrDisableImageMirroringFailed, err)
}
// the image state can be still disabling once we disable the mirroring
// check the mirroring is disabled or not
mirroringInfo, err = rbdVol.GetImageMirroringInfo()
mirroringInfo, err = rv.GetImageMirroringInfo()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
return fmt.Errorf("%w: %w", ErrFetchingMirroringInfo, err)
}
if mirroringInfo.State == librbd.MirrorImageDisabling {
return nil, status.Errorf(codes.Aborted, "%s is in disabling state", rbdVol.VolID)
return fmt.Errorf("%w: %q is in disabling state", ErrAborted, rv.VolID)
}
return &replication.DisableVolumeReplicationResponse{}, nil
return nil
}

View File

@ -17,6 +17,11 @@ RUN source /build.env \
&& mkdir -p /usr/local/go \
&& curl https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${GOARCH}.tar.gz | tar xzf - -C ${GOROOT} --strip-components=1
# TODO: remove the following cmd, when issue
# https://github.com/ceph/ceph-container/issues/2034 is fixed.
RUN dnf config-manager --disable \
tcmu-runner,tcmu-runner-source,tcmu-runner-noarch,ceph-iscsi || true
RUN dnf -y install \
git \
make \

View File

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.0"
const goModuleVersion = "1.18.1"

View File

@ -54,7 +54,7 @@ type Retryer interface {
MaxAttempts() int
// RetryDelay returns the delay that should be used before retrying the
// attempt. Will return error if the if the delay could not be determined.
// attempt. Will return error if the delay could not be determined.
RetryDelay(attempt int, opErr error) (time.Duration, error)
// GetRetryToken attempts to deduct the retry cost from the retry token pool.

View File

@ -1,3 +1,7 @@
# v1.1.34 (2023-06-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.33 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.33"
const goModuleVersion = "1.1.34"

View File

@ -1,3 +1,7 @@
# v2.4.28 (2023-06-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.27 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
const goModuleVersion = "2.4.27"
const goModuleVersion = "2.4.28"

View File

@ -1,3 +1,7 @@
# v1.9.28 (2023-06-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.27 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.9.27"
const goModuleVersion = "1.9.28"

View File

@ -1,3 +1,11 @@
# v1.19.2 (2023-06-15)
* No change notes available for this release.
# v1.19.1 (2023-06-13)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.19.0 (2023-05-08)
* **Feature**: Documentation updates for AWS Security Token Service.

View File

@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.19.0"
const goModuleVersion = "1.19.2"

View File

@ -90,6 +90,7 @@ var partitionRegexp = struct {
AwsIso *regexp.Regexp
AwsIsoB *regexp.Regexp
AwsIsoE *regexp.Regexp
AwsIsoF *regexp.Regexp
AwsUsGov *regexp.Regexp
}{
@ -98,6 +99,7 @@ var partitionRegexp = struct {
AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
}
@ -407,6 +409,27 @@ var defaultPartitions = endpoints.Partitions{
RegionRegex: partitionRegexp.AwsIsoE,
IsRegionalized: true,
},
{
ID: "aws-iso-f",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "sts-fips.{region}.csp.hci.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "sts.{region}.csp.hci.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsIsoF,
IsRegionalized: true,
},
{
ID: "aws-us-gov",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{

View File

@ -3580,6 +3580,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "athena.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -4043,15 +4052,84 @@ var awsPartition = partition{
},
"backupstorage": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -19202,18 +19280,33 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -25107,33 +25200,6 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicediscovery.sa-east-1.amazonaws.com",
},
endpointKey{
Region: "servicediscovery",
}: endpoint{
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "servicediscovery",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "servicediscovery-fips",
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -26571,6 +26637,118 @@ var awsPartition = partition{
},
},
},
"ssm-contacts": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
},
},
},
"ssm-incidents": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -27060,15 +27238,6 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips",
}: endpoint{
Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -30964,6 +31133,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"backupstorage": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"batch": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -33380,6 +33559,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
"backupstorage": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"batch": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.285"
const SDKVersion = "1.44.295"

View File

@ -287,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
if tag.Get("location") != "header" || tag.Get("enum") == "" {
return "", fmt.Errorf("%T is only supported with location header and enum shapes", value)
}
if len(value) == 0 {
return "", errValueNotSet
}
buff := &bytes.Buffer{}
for i, sv := range value {
if sv == nil || len(*sv) == 0 {

View File

@ -1,5 +1,5 @@
//go:build !(nautilus || octopus || pacific) && ceph_preview
// +build !nautilus,!octopus,!pacific,ceph_preview
//go:build !(nautilus || octopus || pacific)
// +build !nautilus,!octopus,!pacific
package admin

View File

@ -1,5 +1,5 @@
//go:build !(nautilus || octopus || pacific) && ceph_preview
// +build !nautilus,!octopus,!pacific,ceph_preview
//go:build !(nautilus || octopus || pacific)
// +build !nautilus,!octopus,!pacific
package admin

140
vendor/github.com/ceph/go-ceph/rbd/locks.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
//go:build !nautilus && ceph_preview
// +build !nautilus,ceph_preview
package rbd
// #cgo LDFLAGS: -lrbd
// #include <errno.h>
// #include <stdlib.h>
// #include <rbd/librbd.h>
import "C"
import (
"unsafe"
)
// LockMode represents a group of configurable lock modes.
type LockMode C.rbd_lock_mode_t
const (
// LockModeExclusive is the representation of RBD_LOCK_MODE_EXCLUSIVE from librbd.
LockModeExclusive = LockMode(C.RBD_LOCK_MODE_EXCLUSIVE)
// LockModeShared is the representation of RBD_LOCK_MODE_SHARED from librbd.
LockModeShared = LockMode(C.RBD_LOCK_MODE_SHARED)
)
// LockAcquire takes a lock on the given image as per the provided lock_mode.
//
// Implements:
//
// int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode);
func (image *Image) LockAcquire(lockMode LockMode) error {
if err := image.validate(imageIsOpen); err != nil {
return err
}
ret := C.rbd_lock_acquire(image.image, C.rbd_lock_mode_t(lockMode))
return getError(ret)
}
// LockBreak breaks the lock of lock_mode on the provided lock_owner.
//
// Implements:
//
// int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
// const char *lock_owner);
func (image *Image) LockBreak(lockMode LockMode, lockOwner string) error {
if err := image.validate(imageIsOpen); err != nil {
return err
}
cLockOwner := C.CString(lockOwner)
defer C.free(unsafe.Pointer(cLockOwner))
ret := C.rbd_lock_break(image.image, C.rbd_lock_mode_t(lockMode), cLockOwner)
return getError(ret)
}
// LockOwner represents information about a lock owner.
type LockOwner struct {
Mode LockMode
Owner string
}
// LockGetOwners fetches the list of lock owners.
//
// Implements:
//
// int rbd_lock_get_owners(rbd_image_t image, rbd_lock_mode_t *lock_mode,
// char **lock_owners, size_t *max_lock_owners);
func (image *Image) LockGetOwners() ([]*LockOwner, error) {
if err := image.validate(imageIsOpen); err != nil {
return nil, err
}
var (
maxLockOwners = C.size_t(8)
cLockOwners = make([]*C.char, 8)
lockMode LockMode
lockOwnersList []*LockOwner
)
for {
ret := C.rbd_lock_get_owners(image.image, (*C.rbd_lock_mode_t)(&lockMode), &cLockOwners[0], &maxLockOwners)
if ret >= 0 {
break
} else if ret == -C.ENOENT {
return nil, nil
} else if ret != -C.ERANGE {
return nil, getError(ret)
}
}
defer C.rbd_lock_get_owners_cleanup(&cLockOwners[0], maxLockOwners)
for i := 0; i < int(maxLockOwners); i++ {
lockOwnersList = append(lockOwnersList, &LockOwner{
Mode: LockMode(lockMode),
Owner: C.GoString(cLockOwners[i]),
})
}
return lockOwnersList, nil
}
// LockIsExclusiveOwner gets the status of the image exclusive lock.
//
// Implements:
//
// int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner);
func (image *Image) LockIsExclusiveOwner() (bool, error) {
if err := image.validate(imageIsOpen); err != nil {
return false, err
}
cIsOwner := C.int(0)
ret := C.rbd_is_exclusive_lock_owner(image.image, &cIsOwner)
if ret != 0 {
return false, getError(ret)
}
return cIsOwner == 1, nil
}
// LockRelease releases a lock on the image.
//
// Implements:
//
// int rbd_lock_release(rbd_image_t image);
func (image *Image) LockRelease() error {
if err := image.validate(imageIsOpen); err != nil {
return err
}
ret := C.rbd_lock_release(image.image)
return getError(ret)
}

View File

@ -1,5 +1,5 @@
//go:build !(octopus || nautilus) && ceph_preview
// +build !octopus,!nautilus,ceph_preview
//go:build !(octopus || nautilus)
// +build !octopus,!nautilus
package rbd

View File

@ -149,14 +149,15 @@ func SetAttributesMirrorPeerSite(ioctx *rados.IOContext, uuid string, attributes
return getError(ret)
}
// MirrorPeerSite contains information about a mirroring peer site.
// MirrorPeerSite is go equivalent of rbd_mirror_peer_site_t struct and contains information
// about a mirroring peer site. Here, we are ignoring the "last_seen" as this property is redundant
// and not updated in the ceph API. Related Ceph issue: https://tracker.ceph.com/issues/59581
type MirrorPeerSite struct {
UUID string
Direction MirrorPeerDirection
SiteName string
MirrorUUID string
ClientName string
LastSeen C.time_t
}
// ListMirrorPeerSite returns the list of peer sites

View File

@ -14,6 +14,7 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
durationpb "google.golang.org/protobuf/types/known/durationpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
@ -854,6 +855,17 @@ type GetVolumeReplicationInfoResponse struct {
// Holds the last sync time.
// This field is REQUIRED.
LastSyncTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=last_sync_time,json=lastSyncTime,proto3" json:"last_sync_time,omitempty"`
// Holds the last sync duration.
// last_sync_duration states the time taken to sync
// to execute the last sync operation.
// This field is OPTIONAL.
LastSyncDuration *durationpb.Duration `protobuf:"bytes,2,opt,name=last_sync_duration,json=lastSyncDuration,proto3" json:"last_sync_duration,omitempty"`
// Holds the last sync bytes.
// Represents number of bytes transferred
// with the last synchronization.
// This field is OPTIONAL.
// The value of this field MUST NOT be negative.
LastSyncBytes int64 `protobuf:"varint,3,opt,name=last_sync_bytes,json=lastSyncBytes,proto3" json:"last_sync_bytes,omitempty"`
}
func (x *GetVolumeReplicationInfoResponse) Reset() {
@ -895,6 +907,20 @@ func (x *GetVolumeReplicationInfoResponse) GetLastSyncTime() *timestamppb.Timest
return nil
}
func (x *GetVolumeReplicationInfoResponse) GetLastSyncDuration() *durationpb.Duration {
if x != nil {
return x.LastSyncDuration
}
return nil
}
func (x *GetVolumeReplicationInfoResponse) GetLastSyncBytes() int64 {
if x != nil {
return x.LastSyncBytes
}
return 0
}
// Specifies what source the replication will be created from. One of the
// type fields MUST be specified.
type ReplicationSource struct {
@ -1113,6 +1139,8 @@ var file_replication_replication_proto_rawDesc = []byte{
0x2f, 0x63, 0x73, 0x69, 0x2f, 0x63, 0x73, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0xe9, 0x03, 0x0a, 0x1e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75,
@ -1298,78 +1326,85 @@ var file_replication_replication_proto_rawDesc = []byte{
0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x64, 0x0a, 0x20, 0x47, 0x65,
0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40,
0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x6d, 0x65,
0x22, 0xa2, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x54, 0x0a,
0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x30, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x67, 0x72,
0x6f, 0x75, 0x70, 0x1a, 0x2b, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64,
0x1a, 0x3b, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x42, 0x06, 0x0a,
0x04, 0x74, 0x79, 0x70, 0x65, 0x32, 0x82, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x6c, 0x65, 0x72, 0x12, 0x76, 0x0a, 0x17, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x2b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e,
0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x18,
0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd5, 0x01, 0x0a, 0x20, 0x47,
0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x40, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x74, 0x69, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x54, 0x69, 0x6d,
0x65, 0x12, 0x47, 0x0a, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x64,
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79,
0x6e, 0x63, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x61,
0x73, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20,
0x01, 0x28, 0x03, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x42, 0x79, 0x74,
0x65, 0x73, 0x22, 0xa2, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x76, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
0x54, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x2b, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75,
0x70, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x42,
0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x32, 0x82, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x76, 0x0a, 0x17, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65,
0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x2b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c,
0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x61,
0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79,
0x0a, 0x18, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x72, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x6d, 0x6f,
0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x72, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74,
0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x55, 0x0a, 0x0c, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x12, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0d, 0x50, 0x72, 0x6f,
0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x2e, 0x72, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65,
0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d,
0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0c, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x79,
0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x72, 0x65, 0x70,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0c, 0x52, 0x65,
0x73, 0x79, 0x6e, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x72, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x56,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x79, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2c, 0x2e, 0x72, 0x65,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x72,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x79, 0x6e,
0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x79, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2c, 0x2e,
0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56,
0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x72, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x3a, 0x3f, 0x0a, 0x0b, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
0x0a, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x0f, 0x5a, 0x0d, 0x2e,
0x3b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x3a, 0x3f, 0x0a, 0x0b,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0x08, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x0f, 0x5a,
0x0d, 0x2e, 0x3b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1413,7 +1448,8 @@ var file_replication_replication_proto_goTypes = []interface{}{
(*ReplicationSource_VolumeSource)(nil), // 24: replication.ReplicationSource.VolumeSource
(*ReplicationSource_VolumeGroupSource)(nil), // 25: replication.ReplicationSource.VolumeGroupSource
(*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp
(*descriptorpb.FieldOptions)(nil), // 27: google.protobuf.FieldOptions
(*durationpb.Duration)(nil), // 27: google.protobuf.Duration
(*descriptorpb.FieldOptions)(nil), // 28: google.protobuf.FieldOptions
}
var file_replication_replication_proto_depIdxs = []int32{
13, // 0: replication.EnableVolumeReplicationRequest.parameters:type_name -> replication.EnableVolumeReplicationRequest.ParametersEntry
@ -1434,26 +1470,27 @@ var file_replication_replication_proto_depIdxs = []int32{
23, // 15: replication.GetVolumeReplicationInfoRequest.secrets:type_name -> replication.GetVolumeReplicationInfoRequest.SecretsEntry
12, // 16: replication.GetVolumeReplicationInfoRequest.replication_source:type_name -> replication.ReplicationSource
26, // 17: replication.GetVolumeReplicationInfoResponse.last_sync_time:type_name -> google.protobuf.Timestamp
24, // 18: replication.ReplicationSource.volume:type_name -> replication.ReplicationSource.VolumeSource
25, // 19: replication.ReplicationSource.volumegroup:type_name -> replication.ReplicationSource.VolumeGroupSource
27, // 20: replication.alpha_field:extendee -> google.protobuf.FieldOptions
0, // 21: replication.Controller.EnableVolumeReplication:input_type -> replication.EnableVolumeReplicationRequest
2, // 22: replication.Controller.DisableVolumeReplication:input_type -> replication.DisableVolumeReplicationRequest
4, // 23: replication.Controller.PromoteVolume:input_type -> replication.PromoteVolumeRequest
6, // 24: replication.Controller.DemoteVolume:input_type -> replication.DemoteVolumeRequest
8, // 25: replication.Controller.ResyncVolume:input_type -> replication.ResyncVolumeRequest
10, // 26: replication.Controller.GetVolumeReplicationInfo:input_type -> replication.GetVolumeReplicationInfoRequest
1, // 27: replication.Controller.EnableVolumeReplication:output_type -> replication.EnableVolumeReplicationResponse
3, // 28: replication.Controller.DisableVolumeReplication:output_type -> replication.DisableVolumeReplicationResponse
5, // 29: replication.Controller.PromoteVolume:output_type -> replication.PromoteVolumeResponse
7, // 30: replication.Controller.DemoteVolume:output_type -> replication.DemoteVolumeResponse
9, // 31: replication.Controller.ResyncVolume:output_type -> replication.ResyncVolumeResponse
11, // 32: replication.Controller.GetVolumeReplicationInfo:output_type -> replication.GetVolumeReplicationInfoResponse
27, // [27:33] is the sub-list for method output_type
21, // [21:27] is the sub-list for method input_type
21, // [21:21] is the sub-list for extension type_name
20, // [20:21] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
27, // 18: replication.GetVolumeReplicationInfoResponse.last_sync_duration:type_name -> google.protobuf.Duration
24, // 19: replication.ReplicationSource.volume:type_name -> replication.ReplicationSource.VolumeSource
25, // 20: replication.ReplicationSource.volumegroup:type_name -> replication.ReplicationSource.VolumeGroupSource
28, // 21: replication.alpha_field:extendee -> google.protobuf.FieldOptions
0, // 22: replication.Controller.EnableVolumeReplication:input_type -> replication.EnableVolumeReplicationRequest
2, // 23: replication.Controller.DisableVolumeReplication:input_type -> replication.DisableVolumeReplicationRequest
4, // 24: replication.Controller.PromoteVolume:input_type -> replication.PromoteVolumeRequest
6, // 25: replication.Controller.DemoteVolume:input_type -> replication.DemoteVolumeRequest
8, // 26: replication.Controller.ResyncVolume:input_type -> replication.ResyncVolumeRequest
10, // 27: replication.Controller.GetVolumeReplicationInfo:input_type -> replication.GetVolumeReplicationInfoRequest
1, // 28: replication.Controller.EnableVolumeReplication:output_type -> replication.EnableVolumeReplicationResponse
3, // 29: replication.Controller.DisableVolumeReplication:output_type -> replication.DisableVolumeReplicationResponse
5, // 30: replication.Controller.PromoteVolume:output_type -> replication.PromoteVolumeResponse
7, // 31: replication.Controller.DemoteVolume:output_type -> replication.DemoteVolumeResponse
9, // 32: replication.Controller.ResyncVolume:output_type -> replication.ResyncVolumeResponse
11, // 33: replication.Controller.GetVolumeReplicationInfo:output_type -> replication.GetVolumeReplicationInfoResponse
28, // [28:34] is the sub-list for method output_type
22, // [22:28] is the sub-list for method input_type
22, // [22:22] is the sub-list for extension type_name
21, // [21:22] is the sub-list for extension extendee
0, // [0:21] is the sub-list for field type_name
}
func init() { file_replication_replication_proto_init() }

View File

@ -54,8 +54,8 @@ func SetMaxGRPCLogLength(characterCount int) {
// file or have format '<protocol>://', following gRPC name resolution mechanism at
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
//
// The function tries to connect indefinitely every second until it connects. The function automatically disables TLS
// and adds interceptor for logging of all gRPC messages at level 5.
// The function tries to connect for 30 seconds, and returns an error if no connection has been established at that point.
// The function automatically disables TLS and adds interceptor for logging of all gRPC messages at level 5.
//
// For a connection to a Unix Domain socket, the behavior after
// loosing the connection is configurable. The default is to
@ -70,7 +70,7 @@ func SetMaxGRPCLogLength(characterCount int) {
// For other connections, the default behavior from gRPC is used and
// loss of connection is not detected reliably.
func Connect(address string, metricsManager metrics.CSIMetricsManager, options ...Option) (*grpc.ClientConn, error) {
return connect(address, metricsManager, []grpc.DialOption{}, options)
return connect(address, metricsManager, []grpc.DialOption{grpc.WithTimeout(time.Second * 30)}, options)
}
// Option is the type of all optional parameters for Connect.

View File

@ -104,6 +104,33 @@ func GetControllerCapabilities(ctx context.Context, conn *grpc.ClientConn) (Cont
return caps, nil
}
// GroupControllerCapabilitySet is set of CSI groupcontroller capabilities. Only supported capabilities are in the map.
type GroupControllerCapabilitySet map[csi.GroupControllerServiceCapability_RPC_Type]bool
// GetGroupControllerCapabilities returns set of supported group controller capabilities of CSI driver.
func GetGroupControllerCapabilities(ctx context.Context, conn *grpc.ClientConn) (GroupControllerCapabilitySet, error) {
client := csi.NewGroupControllerClient(conn)
req := csi.GroupControllerGetCapabilitiesRequest{}
rsp, err := client.GroupControllerGetCapabilities(ctx, &req)
if err != nil {
return nil, err
}
caps := GroupControllerCapabilitySet{}
for _, cap := range rsp.GetCapabilities() {
if cap == nil {
continue
}
rpc := cap.GetRpc()
if rpc == nil {
continue
}
t := rpc.GetType()
caps[t] = true
}
return caps, nil
}
// ProbeForever calls Probe() of a CSI driver and waits until the driver becomes ready.
// Any error other than timeout is returned.
func ProbeForever(conn *grpc.ClientConn, singleProbeTimeout time.Duration) error {

View File

@ -1,3 +1,21 @@
## 2.11.0
In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways:
- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests
- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs.
Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code.
This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve.
### Fixes
- Programmatic focus is no longer overwrriten by CLI filters [d6bba86]
### Maintenance
- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38]
- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d]
## 2.10.0
### Features

View File

@ -8,22 +8,22 @@ import (
)
/*
If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
As a common example, consider:
As a common example, consider:
FDescribe("something to debug", function() {
It("works", function() {...})
It("works", function() {...})
FIt("doesn't work", function() {...})
It("works", function() {...})
})
FDescribe("something to debug", function() {
It("works", function() {...})
It("works", function() {...})
FIt("doesn't work", function() {...})
It("works", function() {...})
})
here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
The nested policy applied by this function enables this behavior.
here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
The nested policy applied by this function enables this behavior.
*/
func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
var walkTree func(tree *TreeNode) bool
@ -44,46 +44,43 @@ func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
}
/*
Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text
and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters.
When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters.
This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
- If there are no CLI arguments and no programmatic focus, do nothing.
- If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus.
- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
- If there are no CLI arguments and no programmatic focus, do nothing.
- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus.
- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != ""
type SkipCheck func(spec Spec) bool
// by default, skip any specs marked pending
skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
hasProgrammaticFocus := false
if !hasFocusCLIFlags {
// check for programmatic focus
for _, spec := range specs {
if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
hasProgrammaticFocus = true
break
}
for _, spec := range specs {
if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
hasProgrammaticFocus = true
break
}
}
if hasProgrammaticFocus {
skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
}
if suiteConfig.LabelFilter != "" {
labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
skipChecks = append(skipChecks, func(spec Spec) bool {
return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
skipChecks = append(skipChecks, func(spec Spec) bool {
return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
})
}

View File

@ -1,3 +1,3 @@
package types
const VERSION = "2.10.0"
const VERSION = "2.11.0"

View File

@ -1033,8 +1033,10 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// We have to defer here because GracefulClose => Close => onClose, which
// requires locking ac.mu.
defer ac.transport.GracefulClose()
ac.transport = nil
if ac.transport != nil {
defer ac.transport.GracefulClose()
ac.transport = nil
}
if len(addrs) == 0 {
ac.updateConnectivityState(connectivity.Idle, nil)

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.56.0"
const Version = "1.56.1"

View File

@ -106,13 +106,19 @@ func (o MarshalOptions) Format(m proto.Message) string {
// MarshalOptions. Do not depend on the output being stable. It may change over
// time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(m)
return o.marshal(nil, m)
}
// MarshalAppend appends the JSON format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
return o.marshal(b, m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
if o.Multiline && o.Indent == "" {
o.Indent = defaultIndent
}
@ -120,7 +126,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := json.NewEncoder(o.Indent)
internalEnc, err := json.NewEncoder(b, o.Indent)
if err != nil {
return nil, err
}
@ -128,7 +134,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
// Treat nil message interface as an empty message,
// in which case the output in an empty JSON object.
if m == nil {
return []byte("{}"), nil
return append(b, '{', '}'), nil
}
enc := encoder{internalEnc, o}

View File

@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string {
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(m)
return o.marshal(nil, m)
}
// MarshalAppend appends the textproto format encoding of m to b,
// returning the result.
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
return o.marshal(b, m)
}
// marshal is a centralized function that all marshal operations go through.
// For profiling purposes, avoid changing the name of this function or
// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII)
if err != nil {
return nil, err
}
@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
// Treat nil message interface as an empty message,
// in which case there is nothing to output.
if m == nil {
return []byte{}, nil
return b, nil
}
enc := encoder{internalEnc, o}

View File

@ -41,8 +41,10 @@ type Encoder struct {
//
// If indent is a non-empty string, it causes every entry for an Array or Object
// to be preceded by the indent and trailed by a newline.
func NewEncoder(indent string) (*Encoder, error) {
e := &Encoder{}
func NewEncoder(buf []byte, indent string) (*Encoder, error) {
e := &Encoder{
out: buf,
}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space or tab characters")
@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte {
// WriteInt writes out the given signed integer in JSON number value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatInt(n, 10)...)
e.out = strconv.AppendInt(e.out, n, 10)
}
// WriteUint writes out the given unsigned integer in JSON number value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatUint(n, 10)...)
e.out = strconv.AppendUint(e.out, n, 10)
}
// StartObject writes out the '{' symbol.

View File

@ -53,8 +53,10 @@ type encoderState struct {
// If outputASCII is true, strings will be serialized in such a way that
// multi-byte UTF-8 sequences are escaped. This property ensures that the
// overall output is ASCII (as opposed to UTF-8).
func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{}
func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{
encoderState: encoderState{out: buf},
}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space and tab characters")
@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte {
// WriteInt writes out the given signed integer value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatInt(n, 10)...)
e.out = strconv.AppendInt(e.out, n, 10)
}
// WriteUint writes out the given unsigned integer value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatUint(n, 10)...)
e.out = strconv.AppendUint(e.out, n, 10)
}
// WriteLiteral writes out the given string as a literal value without quotes.

View File

@ -183,13 +183,58 @@ const (
// Field names for google.protobuf.ExtensionRangeOptions.
const (
ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration"
ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification"
ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.
const (
ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3
)
// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState.
const (
ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState"
ExtensionRangeOptions_VerificationState_enum_name = "VerificationState"
)
// Names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration"
ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration"
)
// Field names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.FieldDescriptorProto.
@ -540,6 +585,7 @@ const (
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@ -552,6 +598,7 @@ const (
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@ -567,6 +614,7 @@ const (
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)

View File

@ -32,6 +32,7 @@ const (
Type_Options_field_name protoreflect.Name = "options"
Type_SourceContext_field_name protoreflect.Name = "source_context"
Type_Syntax_field_name protoreflect.Name = "syntax"
Type_Edition_field_name protoreflect.Name = "edition"
Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name"
Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields"
@ -39,6 +40,7 @@ const (
Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options"
Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context"
Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax"
Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition"
)
// Field numbers for google.protobuf.Type.
@ -49,6 +51,7 @@ const (
Type_Options_field_number protoreflect.FieldNumber = 4
Type_SourceContext_field_number protoreflect.FieldNumber = 5
Type_Syntax_field_number protoreflect.FieldNumber = 6
Type_Edition_field_number protoreflect.FieldNumber = 7
)
// Names for google.protobuf.Field.
@ -121,12 +124,14 @@ const (
Enum_Options_field_name protoreflect.Name = "options"
Enum_SourceContext_field_name protoreflect.Name = "source_context"
Enum_Syntax_field_name protoreflect.Name = "syntax"
Enum_Edition_field_name protoreflect.Name = "edition"
Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name"
Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue"
Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options"
Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context"
Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax"
Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition"
)
// Field numbers for google.protobuf.Enum.
@ -136,6 +141,7 @@ const (
Enum_Options_field_number protoreflect.FieldNumber = 3
Enum_SourceContext_field_number protoreflect.FieldNumber = 4
Enum_Syntax_field_number protoreflect.FieldNumber = 5
Enum_Edition_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.EnumValue.

View File

@ -33,7 +33,7 @@ var (
return !inOneof(ox) && inOneof(oy)
}
// Fields in disjoint oneof sets are sorted by declaration index.
if ox != nil && oy != nil && ox != oy {
if inOneof(ox) && inOneof(oy) && ox != oy {
return ox.Index() < oy.Index()
}
// Fields sorted by field number.

View File

@ -51,7 +51,7 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
Minor = 30
Minor = 31
Patch = 0
PreRelease = ""
)

View File

@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore
}
func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
sizeTag := protowire.SizeTag(num)
if fd.IsPacked() && list.Len() > 0 {
content := 0
for i, llen := 0, list.Len(); i < llen; i++ {
content += o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return protowire.SizeTag(num) + protowire.SizeBytes(content)
return sizeTag + protowire.SizeBytes(content)
}
for i, llen := 0, list.Len(); i < llen; i++ {
size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return size
}
func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
sizeTag := protowire.SizeTag(num)
mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
size += protowire.SizeTag(num)
size += sizeTag
size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
return true
})

View File

@ -363,6 +363,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendSingularField(b, "retention", nil)
case 18:
b = p.appendSingularField(b, "target", nil)
case 19:
b = p.appendRepeatedField(b, "targets", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@ -418,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
switch (*p)[0] {
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
case 2:
b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration)
case 3:
b = p.appendSingularField(b, "verification", nil)
}
return b
}
@ -473,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
}
return b
}
func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte {
if len(*p) == 0 {
return b
}
switch (*p)[0] {
case 1:
b = p.appendSingularField(b, "number", nil)
case 2:
b = p.appendSingularField(b, "full_name", nil)
case 3:
b = p.appendSingularField(b, "type", nil)
case 4:
b = p.appendSingularField(b, "is_repeated", nil)
case 5:
b = p.appendSingularField(b, "reserved", nil)
case 6:
b = p.appendSingularField(b, "repeated", nil)
}
return b
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,177 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dynamicpb
import (
"fmt"
"strings"
"sync"
"sync/atomic"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
type extField struct {
name protoreflect.FullName
number protoreflect.FieldNumber
}
// A Types is a collection of dynamically constructed descriptors.
// Its methods are safe for concurrent use.
//
// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver.
// A Types may be used as a proto.UnmarshalOptions.Resolver.
type Types struct {
files *protoregistry.Files
extMu sync.Mutex
atomicExtFiles uint64
extensionsByMessage map[extField]protoreflect.ExtensionDescriptor
}
// NewTypes creates a new Types registry with the provided files.
// The Files registry is retained, and changes to Files will be reflected in Types.
// It is not safe to concurrently change the Files while calling Types methods.
func NewTypes(f *protoregistry.Files) *Types {
return &Types{
files: f,
}
}
// FindEnumByName looks up an enum by its full name;
// e.g., "google.protobuf.Field.Kind".
//
// This returns (nil, protoregistry.NotFound) if not found.
func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
return nil, err
}
ed, ok := d.(protoreflect.EnumDescriptor)
if !ok {
return nil, errors.New("found wrong type: got %v, want enum", descName(d))
}
return NewEnumType(ed), nil
}
// FindExtensionByName looks up an extension field by the field's full name.
// Note that this is the full name of the field as determined by
// where the extension is declared and is unrelated to the full name of the
// message being extended.
//
// This returns (nil, protoregistry.NotFound) if not found.
func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
return nil, err
}
xd, ok := d.(protoreflect.ExtensionDescriptor)
if !ok {
return nil, errors.New("found wrong type: got %v, want extension", descName(d))
}
return NewExtensionType(xd), nil
}
// FindExtensionByNumber looks up an extension field by the field number
// within some parent message, identified by full name.
//
// This returns (nil, protoregistry.NotFound) if not found.
func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
// Construct the extension number map lazily, since not every user will need it.
// Update the map if new files are added to the registry.
if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) {
t.updateExtensions()
}
xd := t.extensionsByMessage[extField{message, field}]
if xd == nil {
return nil, protoregistry.NotFound
}
return NewExtensionType(xd), nil
}
// FindMessageByName looks up a message by its full name;
// e.g. "google.protobuf.Any".
//
// This returns (nil, protoregistry.NotFound) if not found.
func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
return nil, err
}
md, ok := d.(protoreflect.MessageDescriptor)
if !ok {
return nil, errors.New("found wrong type: got %v, want message", descName(d))
}
return NewMessageType(md), nil
}
// FindMessageByURL looks up a message by a URL identifier.
// See documentation on google.protobuf.Any.type_url for the URL format.
//
// This returns (nil, protoregistry.NotFound) if not found.
func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// This function is similar to FindMessageByName but
// truncates anything before and including '/' in the URL.
message := protoreflect.FullName(url)
if i := strings.LastIndexByte(url, '/'); i >= 0 {
message = message[i+len("/"):]
}
return t.FindMessageByName(message)
}
func (t *Types) updateExtensions() {
t.extMu.Lock()
defer t.extMu.Unlock()
if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) {
return
}
defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles()))
t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
t.registerExtensions(fd.Extensions())
t.registerExtensionsInMessages(fd.Messages())
return true
})
}
func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) {
count := mds.Len()
for i := 0; i < count; i++ {
md := mds.Get(i)
t.registerExtensions(md.Extensions())
t.registerExtensionsInMessages(md.Messages())
}
}
func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) {
count := xds.Len()
for i := 0; i < count; i++ {
xd := xds.Get(i)
field := xd.Number()
message := xd.ContainingMessage().FullName()
if t.extensionsByMessage == nil {
t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor)
}
t.extensionsByMessage[extField{message, field}] = xd
}
}
func descName(d protoreflect.Descriptor) string {
switch d.(type) {
case protoreflect.EnumDescriptor:
return "enum"
case protoreflect.EnumValueDescriptor:
return "enum value"
case protoreflect.MessageDescriptor:
return "message"
case protoreflect.ExtensionDescriptor:
return "extension"
case protoreflect.ServiceDescriptor:
return "service"
default:
return fmt.Sprintf("%T", d)
}
}

View File

@ -142,39 +142,39 @@ import (
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
// // or ...
// if (any.isSameTypeAs(Foo.getDefaultInstance())) {
// foo = any.unpack(Foo.getDefaultInstance());
// }
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
// // or ...
// if (any.isSameTypeAs(Foo.getDefaultInstance())) {
// foo = any.unpack(Foo.getDefaultInstance());
// }
//
// Example 3: Pack and unpack a message in Python.
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := anypb.New(foo)
// if err != nil {
// ...
// }
// ...
// foo := &pb.Foo{}
// if err := any.UnmarshalTo(foo); err != nil {
// ...
// }
// foo := &pb.Foo{...}
// any, err := anypb.New(foo)
// if err != nil {
// ...
// }
// ...
// foo := &pb.Foo{}
// if err := any.UnmarshalTo(foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
@ -182,8 +182,8 @@ import (
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
// # JSON
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:

View File

@ -132,7 +132,7 @@ import (
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
// The JSON representation for `NullValue` is JSON `null`.
type NullValue int32
const (

View File

@ -167,7 +167,7 @@ import (
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
state protoimpl.MessageState

28
vendor/modules.txt vendored
View File

@ -20,7 +20,7 @@ github.com/armon/go-metrics
# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
## explicit
github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.44.285
# github.com/aws/aws-sdk-go v1.44.295
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
@ -63,7 +63,7 @@ github.com/aws/aws-sdk-go/service/sso
github.com/aws/aws-sdk-go/service/sso/ssoiface
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/aws/aws-sdk-go-v2 v1.18.0
# github.com/aws/aws-sdk-go-v2 v1.18.1
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/aws
github.com/aws/aws-sdk-go-v2/aws/defaults
@ -80,16 +80,16 @@ github.com/aws/aws-sdk-go-v2/internal/sdk
github.com/aws/aws-sdk-go-v2/internal/strings
github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
github.com/aws/aws-sdk-go-v2/internal/timeconv
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/configsources
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27
# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
# github.com/aws/aws-sdk-go-v2/service/sts v1.19.0
# github.com/aws/aws-sdk-go-v2/service/sts v1.19.2
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/sts
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
@ -130,7 +130,7 @@ github.com/ceph/ceph-csi/api/deploy/kubernetes/cephfs
github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs
github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd
github.com/ceph/ceph-csi/api/deploy/ocp
# github.com/ceph/go-ceph v0.21.0
# github.com/ceph/go-ceph v0.22.0
## explicit; go 1.19
github.com/ceph/go-ceph/cephfs/admin
github.com/ceph/go-ceph/common/admin/manager
@ -162,7 +162,7 @@ github.com/coreos/go-systemd/v22/journal
# github.com/csi-addons/replication-lib-utils v0.2.0
## explicit; go 1.15
github.com/csi-addons/replication-lib-utils/protosanitizer
# github.com/csi-addons/spec v0.2.0
# github.com/csi-addons/spec v0.2.1-0.20230606140122-d20966d2e444
## explicit
github.com/csi-addons/spec/lib/go/fence
github.com/csi-addons/spec/lib/go/identity
@ -391,7 +391,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
# github.com/kubernetes-csi/csi-lib-utils v0.13.0
# github.com/kubernetes-csi/csi-lib-utils v0.14.0
## explicit; go 1.18
github.com/kubernetes-csi/csi-lib-utils/connection
github.com/kubernetes-csi/csi-lib-utils/metrics
@ -446,7 +446,7 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
# github.com/onsi/ginkgo/v2 v2.10.0
# github.com/onsi/ginkgo/v2 v2.11.0
## explicit; go 1.18
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
@ -748,7 +748,7 @@ google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.56.0
# google.golang.org/grpc v1.56.1
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes
@ -801,7 +801,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
# google.golang.org/protobuf v1.30.0
# google.golang.org/protobuf v1.31.0
## explicit; go 1.11
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
@ -852,7 +852,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
# k8s.io/api v0.27.2 => k8s.io/api v0.27.2
# k8s.io/api v0.27.3 => k8s.io/api v0.27.2
## explicit; go 1.20
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@ -912,7 +912,7 @@ k8s.io/api/storage/v1beta1
## explicit; go 1.20
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.27.2
# k8s.io/apimachinery v0.27.3 => k8s.io/apimachinery v0.27.2
## explicit; go 1.20
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors