Merge pull request #255 from red-hat-storage/sync_ds--devel

Syncing latest changes from devel for ceph-csi
This commit is contained in:
openshift-merge-bot[bot] 2024-02-15 07:02:48 +00:00 committed by GitHub
commit 09363a22dd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 788 additions and 98 deletions

View File

@ -4,13 +4,13 @@ go 1.18
require (
github.com/google/go-github v17.0.0+incompatible
golang.org/x/oauth2 v0.16.0
golang.org/x/oauth2 v0.17.0
)
require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/net v0.21.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)

View File

@ -11,10 +11,10 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=

View File

@ -7,10 +7,10 @@ github.com/google/go-github/github
# github.com/google/go-querystring v1.1.0
## explicit; go 1.10
github.com/google/go-querystring/query
# golang.org/x/net v0.20.0
# golang.org/x/net v0.21.0
## explicit; go 1.18
golang.org/x/net/context
# golang.org/x/oauth2 v0.16.0
# golang.org/x/oauth2 v0.17.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal

View File

@ -12,7 +12,7 @@
CSI_IMAGE_VERSION=canary
# cephcsi upgrade version
CSI_UPGRADE_VERSION=v3.10.1
CSI_UPGRADE_VERSION=v3.10.2
# Ceph version to use
BASE_IMAGE=quay.io/ceph/ceph:v18
@ -54,11 +54,11 @@ ROOK_VERSION=v1.12.1
ROOK_CEPH_CLUSTER_IMAGE=quay.io/ceph/ceph:v18
# CSI sidecar version
CSI_ATTACHER_VERSION=v4.4.3
CSI_SNAPSHOTTER_VERSION=v6.3.3
CSI_RESIZER_VERSION=v1.9.3
CSI_PROVISIONER_VERSION=v3.6.3
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.9.3
CSI_ATTACHER_VERSION=v4.5.0
CSI_SNAPSHOTTER_VERSION=v7.0.0
CSI_RESIZER_VERSION=v1.10.0
CSI_PROVISIONER_VERSION=v4.0.0
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.10.0
# e2e settings
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root

View File

@ -124,7 +124,7 @@ charts and their default values.
| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `nodeplugin.registrar.image.repository` | Node-Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.9.3` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.10.0` |
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
@ -145,17 +145,17 @@ charts and their default values.
| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.6.3` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v4.0.0` |
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.9.3` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.10.0` |
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.3.3` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v7.0.0` |
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |

View File

@ -109,7 +109,7 @@ nodeplugin:
registrar:
image:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.9.3
tag: v2.10.0
pullPolicy: IfNotPresent
resources: {}
@ -201,7 +201,7 @@ provisioner:
provisioner:
image:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.6.3
tag: v4.0.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check
@ -216,7 +216,7 @@ provisioner:
enabled: true
image:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.9.3
tag: v1.10.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check
@ -226,7 +226,7 @@ provisioner:
snapshotter:
image:
repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v6.3.3
tag: v7.0.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check

View File

@ -126,7 +126,7 @@ charts and their default values.
| `nodeplugin.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
| `nodeplugin.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `nodeplugin.registrar.image.repository` | Node Registrar image repository URL | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.9.3` |
| `nodeplugin.registrar.image.tag` | Image tag | `v2.10.0` |
| `nodeplugin.registrar.image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `nodeplugin.plugin.image.repository` | Nodeplugin image repository URL | `quay.io/cephcsi/cephcsi` |
| `nodeplugin.plugin.image.tag` | Image tag | `canary` |
@ -151,23 +151,23 @@ charts and their default values.
| `provisioner.imagePullSecrets` | Specifies imagePullSecrets for containers | `[]` |
| `provisioner.profiling.enabled` | Specifies whether profiling should be enabled | `false` |
| `provisioner.provisioner.image.repository` | Specifies the csi-provisioner image repository URL | `registry.k8s.io/sig-storage/csi-provisioner` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v3.6.3` |
| `provisioner.provisioner.image.tag` | Specifies image tag | `v4.0.0` |
| `provisioner.provisioner.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.provisioner.image.extraArgs` | Specifies extra arguments for the provisioner sidecar | `[]` |
| `provisioner.attacher.image.repository` | Specifies the csi-attacher image repository URL | `registry.k8s.io/sig-storage/csi-attacher` |
| `provisioner.attacher.image.tag` | Specifies image tag | `v4.4.3` |
| `provisioner.attacher.image.tag` | Specifies image tag | `v4.5.` |
| `provisioner.attacher.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.attacher.image.extraArgs` | Specifies extra arguments for the attacher sidecar | `[]` |
| `provisioner.attacher.name` | Specifies the name of csi-attacher sidecar | `attacher` |
| `provisioner.attacher.enabled` | Specifies whether attacher sidecar is enabled | `true` |
| `provisioner.resizer.image.repository` | Specifies the csi-resizer image repository URL | `registry.k8s.io/sig-storage/csi-resizer` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.9.3` |
| `provisioner.resizer.image.tag` | Specifies image tag | `v1.10.0` |
| `provisioner.resizer.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.resizer.image.extraArgs` | Specifies extra arguments for the resizer sidecar | `[]` |
| `provisioner.resizer.name` | Specifies the name of csi-resizer sidecar | `resizer` |
| `provisioner.resizer.enabled` | Specifies whether resizer sidecar is enabled | `true` |
| `provisioner.snapshotter.image.repository` | Specifies the csi-snapshotter image repository URL | `registry.k8s.io/sig-storage/csi-snapshotter` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v6.3.3` |
| `provisioner.snapshotter.image.tag` | Specifies image tag | `v7.0.0` |
| `provisioner.snapshotter.image.pullPolicy` | Specifies pull policy | `IfNotPresent` |
| `provisioner.snapshotter.image.extraArgs` | Specifies extra arguments for the snapshotter sidecar | `[]` |
| `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` |

View File

@ -120,7 +120,7 @@ nodeplugin:
registrar:
image:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.9.3
tag: v2.10.0
pullPolicy: IfNotPresent
resources: {}
@ -222,7 +222,7 @@ provisioner:
provisioner:
image:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.6.3
tag: v4.0.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check
@ -237,7 +237,7 @@ provisioner:
enabled: true
image:
repository: registry.k8s.io/sig-storage/csi-attacher
tag: v4.4.3
tag: v4.5.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check
@ -249,7 +249,7 @@ provisioner:
enabled: true
image:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.9.3
tag: v1.10.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check
@ -259,7 +259,7 @@ provisioner:
snapshotter:
image:
repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v6.3.3
tag: v7.0.0
pullPolicy: IfNotPresent
resources: {}
## For further options, check

View File

@ -43,7 +43,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.3
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -62,7 +62,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.9.3
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -79,7 +79,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.3
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"

View File

@ -27,7 +27,7 @@ spec:
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.3
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"

View File

@ -40,7 +40,7 @@ spec:
topologyKey: "kubernetes.io/hostname"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.3
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -57,7 +57,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.9.3
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -74,7 +74,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.3
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"

View File

@ -27,7 +27,7 @@ spec:
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.3
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"

View File

@ -47,7 +47,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.3
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -69,7 +69,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.3
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
@ -84,7 +84,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:v4.4.3
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
args:
- "--v=1"
- "--csi-address=$(ADDRESS)"
@ -99,7 +99,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.9.3
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"

View File

@ -29,7 +29,7 @@ spec:
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.3
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"

4
go.mod
View File

@ -6,7 +6,7 @@ toolchain go1.21.5
require (
github.com/IBM/keyprotect-go-client v0.12.2
github.com/aws/aws-sdk-go v1.50.6
github.com/aws/aws-sdk-go v1.50.16
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
github.com/ceph/go-ceph v0.25.0
@ -18,7 +18,7 @@ require (
github.com/google/uuid v1.6.0
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/hashicorp/vault/api v1.11.0
github.com/hashicorp/vault/api v1.12.0
github.com/kubernetes-csi/csi-lib-utils v0.17.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0
github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1

8
go.sum
View File

@ -821,8 +821,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.50.6 h1:FaXvNwHG3Ri1paUEW16Ahk9zLVqSAdqa1M3phjZR35Q=
github.com/aws/aws-sdk-go v1.50.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.50.16 h1:/KuHK+Sadp9BKXWWtMhPtBdj+PLIFCnQZxQnsuLhxKc=
github.com/aws/aws-sdk-go v1.50.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
@ -1251,8 +1251,8 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
github.com/hashicorp/vault/api v1.11.0 h1:AChWByeHf4/P9sX3Y1B7vFsQhZO2BgQiCMQ2SA1P1UY=
github.com/hashicorp/vault/api v1.11.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4=
github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
github.com/hashicorp/vault/api/auth/approle v0.5.0 h1:a1TK6VGwYqSAfkmX4y4dJ4WBxMU5dStIZqScW4EPXR8=
github.com/hashicorp/vault/api/auth/approle v0.5.0/go.mod h1:CHOQIA1AZACfjTzHggmyfiOZ+xCSKNRFqe48FTCzH0k=
github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 h1:CXO0fD7M3iCGovP/UApeHhPcH4paDFKcu7AjEXi94rI=

View File

@ -47,3 +47,19 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities(
Capabilities: cs.Driver.capabilities,
}, nil
}
// GroupControllerGetCapabilities implements the default
// GroupControllerGetCapabilities GRPC callout.
func (cs *DefaultControllerServer) GroupControllerGetCapabilities(
ctx context.Context,
req *csi.GroupControllerGetCapabilitiesRequest,
) (*csi.GroupControllerGetCapabilitiesResponse, error) {
log.TraceLog(ctx, "Using default GroupControllerGetCapabilities")
if cs.Driver == nil {
return nil, status.Error(codes.Unimplemented, "Group controller server is not enabled")
}
return &csi.GroupControllerGetCapabilitiesResponse{
Capabilities: cs.Driver.groupCapabilities,
}, nil
}

View File

@ -31,9 +31,10 @@ type CSIDriver struct {
nodeID string
version string
// topology constraints that this nodeserver will advertise
topology map[string]string
capabilities []*csi.ControllerServiceCapability
vc []*csi.VolumeCapability_AccessMode
topology map[string]string
capabilities []*csi.ControllerServiceCapability
groupCapabilities []*csi.GroupControllerServiceCapability
vc []*csi.VolumeCapability_AccessMode
}
// NewCSIDriver Creates a NewCSIDriver object. Assumes vendor
@ -116,3 +117,34 @@ func (d *CSIDriver) AddVolumeCapabilityAccessModes(
func (d *CSIDriver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode {
return d.vc
}
// AddControllerServiceCapabilities stores the group controller capabilities
// in driver object.
func (d *CSIDriver) AddGroupControllerServiceCapabilities(cl []csi.GroupControllerServiceCapability_RPC_Type) {
csc := make([]*csi.GroupControllerServiceCapability, 0, len(cl))
for _, c := range cl {
log.DefaultLog("Enabling group controller service capability: %v", c.String())
csc = append(csc, NewGroupControllerServiceCapability(c))
}
d.groupCapabilities = csc
}
// ValidateGroupControllerServiceRequest validates the group controller
// plugin capabilities.
//
//nolint:interfacer // c can be of type fmt.Stringer, but that does not make the API clearer
func (d *CSIDriver) ValidateGroupControllerServiceRequest(c csi.GroupControllerServiceCapability_RPC_Type) error {
if c == csi.GroupControllerServiceCapability_RPC_UNKNOWN {
return nil
}
for _, capability := range d.groupCapabilities {
if c == capability.GetRpc().GetType() {
return nil
}
}
return status.Error(codes.InvalidArgument, c.String())
}

View File

@ -45,6 +45,7 @@ type Servers struct {
IS csi.IdentityServer
CS csi.ControllerServer
NS csi.NodeServer
GS csi.GroupControllerServer
}
// NewNonBlockingGRPCServer return non-blocking GRPC.
@ -109,6 +110,9 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, srv Servers) {
if srv.NS != nil {
csi.RegisterNodeServer(server, srv.NS)
}
if srv.GS != nil {
csi.RegisterGroupControllerServer(server, srv.GS)
}
log.DefaultLog("Listening for connections on address: %#v", listener.Addr())
err = server.Serve(listener)

View File

@ -28,6 +28,7 @@ import (
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/csi-addons/spec/lib/go/replication"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"google.golang.org/grpc"
@ -95,6 +96,18 @@ func NewControllerServiceCapability(ctrlCap csi.ControllerServiceCapability_RPC_
}
}
// NewGroupControllerServiceCapability returns group controller capabilities.
func NewGroupControllerServiceCapability(ctrlCap csi.GroupControllerServiceCapability_RPC_Type,
) *csi.GroupControllerServiceCapability {
return &csi.GroupControllerServiceCapability{
Type: &csi.GroupControllerServiceCapability_Rpc{
Rpc: &csi.GroupControllerServiceCapability_RPC{
Type: ctrlCap,
},
},
}
}
// NewMiddlewareServerOption creates a new grpc.ServerOption that configures a
// common format for log messages and other gRPC related handlers.
func NewMiddlewareServerOption() grpc.ServerOption {
@ -133,6 +146,27 @@ func getReqID(req interface{}) string {
case *csi.NodeExpandVolumeRequest:
reqID = r.VolumeId
case *csi.CreateVolumeGroupSnapshotRequest:
reqID = r.Name
case *csi.DeleteVolumeGroupSnapshotRequest:
reqID = r.GroupSnapshotId
case *csi.GetVolumeGroupSnapshotRequest:
reqID = r.GroupSnapshotId
// Replication
case *replication.EnableVolumeReplicationRequest:
reqID = r.VolumeId
case *replication.DisableVolumeReplicationRequest:
reqID = r.VolumeId
case *replication.PromoteVolumeRequest:
reqID = r.VolumeId
case *replication.DemoteVolumeRequest:
reqID = r.VolumeId
case *replication.ResyncVolumeRequest:
reqID = r.VolumeId
case *replication.GetVolumeReplicationInfoRequest:
reqID = r.VolumeId
}
return reqID

View File

@ -24,6 +24,7 @@ import (
"testing"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/csi-addons/spec/lib/go/replication"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
mount "k8s.io/mount-utils"
@ -65,6 +66,35 @@ func TestGetReqID(t *testing.T) {
&csi.NodeExpandVolumeRequest{
VolumeId: fakeID,
},
&csi.CreateVolumeGroupSnapshotRequest{
Name: fakeID,
},
&csi.DeleteVolumeGroupSnapshotRequest{
GroupSnapshotId: fakeID,
},
&csi.GetVolumeGroupSnapshotRequest{
GroupSnapshotId: fakeID,
},
&replication.EnableVolumeReplicationRequest{
VolumeId: fakeID,
},
&replication.DisableVolumeReplicationRequest{
VolumeId: fakeID,
},
&replication.PromoteVolumeRequest{
VolumeId: fakeID,
},
&replication.DemoteVolumeRequest{
VolumeId: fakeID,
},
&replication.ResyncVolumeRequest{
VolumeId: fakeID,
},
&replication.GetVolumeReplicationInfoRequest{
VolumeId: fakeID,
},
}
for _, r := range req {
if got := getReqID(r); got != fakeID {

View File

@ -19,6 +19,7 @@ package journal
import (
"context"
"errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -79,7 +80,7 @@ func getOMapValues(
log.ErrorLog(ctx, "omap not found (pool=%q, namespace=%q, name=%q): %v",
poolName, namespace, oid, err)
return nil, util.JoinErrors(util.ErrKeyNotFound, err)
return nil, fmt.Errorf("%w: %w", util.ErrKeyNotFound, err)
}
return nil, err
@ -168,3 +169,57 @@ func omapPoolError(err error) error {
return err
}
// listOMapValues fetches all omap values for a given oid, prefix, and namespace.
func listOMapValues(
ctx context.Context,
conn *Connection,
poolName, namespace, oid, prefix string,
) (map[string]string, error) {
// fetch and configure the rados ioctx
ioctx, err := conn.conn.GetIoctx(poolName)
if err != nil {
return nil, omapPoolError(err)
}
defer ioctx.Destroy()
if namespace != "" {
ioctx.SetNamespace(namespace)
}
results := map[string]string{}
numKeys := uint64(0)
startAfter := ""
for {
prevNumKeys := numKeys
err = ioctx.ListOmapValues(
oid, startAfter, prefix, chunkSize,
func(key string, value []byte) {
numKeys++
startAfter = key
results[key] = string(value)
},
)
// if we hit an error, or no new keys were seen, exit the loop
if err != nil || numKeys == prevNumKeys {
break
}
}
if err != nil {
if errors.Is(err, rados.ErrNotFound) {
log.ErrorLog(ctx, "omap not found (pool=%q, namespace=%q, name=%q): %v",
poolName, namespace, oid, err)
return nil, fmt.Errorf("%w: %w", util.ErrKeyNotFound, err)
}
return nil, err
}
log.DebugLog(ctx, "got omap values: (pool=%q, namespace=%q, name=%q): %+v",
poolName, namespace, oid, results)
return results, nil
}

View File

@ -0,0 +1,434 @@
/*
Copyright 2024 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package journal
import (
"context"
"errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/google/uuid"
)
const (
defaultVolumeGroupNamingPrefix string = "csi-vol-group-"
)
type VolumeGroupJournal interface {
// Connect establishes a new connection to a ceph cluster for journal metadata.
Connect(
monitors,
namespace string,
cr *util.Credentials) (*volumeGroupJournalConfig, error)
// Destroy frees any resources and invalidates the journal connection.
Destroy()
// SetNamespace sets the namespace for the journal.
SetNamespace(ns string)
CheckReservation(
ctx context.Context,
journalPool,
reqName,
namePrefix string) (*VolumeGroupData, error)
UndoReservation(
ctx context.Context,
csiJournalPool,
snapshotGroupName,
reqName string) error
// GetGroupAttributes fetches all keys and their values, from a UUID directory,
// returning VolumeGroupAttributes structure.
GetVolumeGroupAttributes(
ctx context.Context,
pool,
objectUUID string) (*VolumeGroupAttributes, error)
ReserveName(
ctx context.Context,
journalPool string,
journalPoolID int64,
reqName,
namePrefix string) (string, string, error)
// AddVolumeSnapshotMapping adds a volumeID and snapshotID mapping to the UUID directory.
AddVolumeSnapshotMapping(
ctx context.Context,
pool,
reservedUUID,
volumeID,
snapshotID string) error
// RemoveVolumeSnapshotMapping removes a volumeID and snapshotID mapping from the UUID directory.
RemoveVolumeSnapshotMapping(
ctx context.Context,
pool,
reservedUUID,
volumeID string) error
}
// volumeGroupJournalConfig contains the configuration and connection details.
type volumeGroupJournalConfig struct {
*Config
*Connection
}
// NewCSIVolumeroupJournal returns an instance of VolumeGroupJournal for groups.
func NewCSIVolumeroupJournal(suffix string) VolumeGroupJournal {
return &volumeGroupJournalConfig{
Config: &Config{
csiDirectory: "csi.groups." + suffix,
csiNameKeyPrefix: "csi.volume.group.",
cephUUIDDirectoryPrefix: "csi.volume.group.",
csiImageKey: "csi.groupname",
csiNameKey: "csi.volname",
namespace: "",
},
}
}
func (sgj *volumeGroupJournalConfig) SetNamespace(ns string) {
sgj.Config.namespace = ns
}
// NewCSIVolumeGroupJournalWithNamespace returns an instance of VolumeGroupJournal for
// volume groups using a predetermined namespace value.
func NewCSIVolumeGroupJournalWithNamespace(suffix, ns string) VolumeGroupJournal {
j := NewCSIVolumeroupJournal(suffix)
j.SetNamespace(ns)
return j
}
func (sgj *volumeGroupJournalConfig) Connect(
monitors,
namespace string,
cr *util.Credentials,
) (*volumeGroupJournalConfig, error) {
conn, err := sgj.Config.Connect(monitors, namespace, cr)
if err != nil {
return nil, err
}
sgj.Connection = conn
return sgj, nil
}
func (sgj *volumeGroupJournalConfig) Destroy() {
sgj.Connection.Destroy()
}
// VolumeGroupData contains the GroupUUID and VolumeGroupAttributes for a
// volume group.
type VolumeGroupData struct {
GroupUUID string
GroupName string
VolumeGroupAttributes *VolumeGroupAttributes
}
func generateVolumeGroupName(namePrefix, groupUUID string) string {
if namePrefix == "" {
namePrefix = defaultVolumeGroupNamingPrefix
}
return namePrefix + groupUUID
}
/*
CheckReservation checks if given request name contains a valid reservation
- If there is a valid reservation, then the corresponding VolumeGroupData for
the snapshot group is returned
- If there is a reservation that is stale (or not fully cleaned up), it is
garbage collected using the UndoReservation call, as appropriate
NOTE: As the function manipulates omaps, it should be called with a lock
against the request name held, to prevent parallel operations from modifying
the state of the omaps for this request name.
Return values:
- VolumeGroupData: which contains the GroupUUID and GroupSnapshotAttributes
that were reserved for the passed in reqName, empty if there was no
reservation found.
- error: non-nil in case of any errors.
*/
func (sgj *volumeGroupJournalConfig) CheckReservation(ctx context.Context,
journalPool, reqName, namePrefix string,
) (*VolumeGroupData, error) {
var (
cj = sgj.Config
volGroupData = &VolumeGroupData{}
)
// check if request name is already part of the directory omap
fetchKeys := []string{
cj.csiNameKeyPrefix + reqName,
}
values, err := getOMapValues(
ctx, sgj.Connection, journalPool, cj.namespace, cj.csiDirectory,
cj.commonPrefix, fetchKeys)
if err != nil {
if errors.Is(err, util.ErrKeyNotFound) || errors.Is(err, util.ErrPoolNotFound) {
// pool or omap (oid) was not present
// stop processing but without an error for no reservation exists
return nil, nil
}
return nil, err
}
objUUID, found := values[cj.csiNameKeyPrefix+reqName]
if !found {
// omap was read but was missing the desired key-value pair
// stop processing but without an error for no reservation exists
return nil, nil
}
volGroupData.GroupUUID = objUUID
savedVolumeGroupAttributes, err := sgj.GetVolumeGroupAttributes(ctx, journalPool,
objUUID)
if err != nil {
// error should specifically be not found, for image to be absent, any other error
// is not conclusive, and we should not proceed
if errors.Is(err, util.ErrKeyNotFound) {
err = sgj.UndoReservation(ctx, journalPool,
generateVolumeGroupName(namePrefix, objUUID), reqName)
}
return nil, err
}
// check if the request name in the omap matches the passed in request name
if savedVolumeGroupAttributes.RequestName != reqName {
// NOTE: This should never be possible, hence no cleanup, but log error
// and return, as cleanup may need to occur manually!
return nil, fmt.Errorf("internal state inconsistent, omap names mismatch,"+
" request name (%s) volume group UUID (%s) volume group omap name (%s)",
reqName, objUUID, savedVolumeGroupAttributes.RequestName)
}
volGroupData.GroupName = savedVolumeGroupAttributes.GroupName
volGroupData.VolumeGroupAttributes = &VolumeGroupAttributes{}
volGroupData.VolumeGroupAttributes.RequestName = savedVolumeGroupAttributes.RequestName
volGroupData.VolumeGroupAttributes.VolumeSnapshotMap = savedVolumeGroupAttributes.VolumeSnapshotMap
return volGroupData, nil
}
/*
UndoReservation undoes a reservation, in the reverse order of ReserveName
- The UUID directory is cleaned up before the GroupName key in the csiDirectory is cleaned up
NOTE: Ensure that the Ceph volume snapshots backing the reservation is cleaned up
prior to cleaning up the reservation
NOTE: As the function manipulates omaps, it should be called with a lock against the request name
held, to prevent parallel operations from modifying the state of the omaps for this request name.
Input arguments:
- csiJournalPool: Pool name that holds the CSI request name based journal
- groupID: ID of the volume group, generated from the UUID
- reqName: Request name for the volume group
*/
func (sgj *volumeGroupJournalConfig) UndoReservation(ctx context.Context,
csiJournalPool, groupID, reqName string,
) error {
// delete volume UUID omap (first, inverse of create order)
cj := sgj.Config
if groupID != "" {
if len(groupID) < uuidEncodedLength {
return fmt.Errorf("unable to parse UUID from %s, too short", groupID)
}
groupUUID := groupID[len(groupID)-36:]
if _, err := uuid.Parse(groupUUID); err != nil {
return fmt.Errorf("failed parsing UUID in %s: %w", groupUUID, err)
}
err := util.RemoveObject(
ctx,
sgj.Connection.monitors,
sgj.Connection.cr,
csiJournalPool,
cj.namespace,
cj.cephUUIDDirectoryPrefix+groupUUID)
if err != nil {
if !errors.Is(err, util.ErrObjectNotFound) {
log.ErrorLog(ctx, "failed removing oMap %s (%s)", cj.cephUUIDDirectoryPrefix+groupUUID, err)
return err
}
}
}
// delete the request name key (last, inverse of create order)
err := removeMapKeys(ctx, sgj.Connection, csiJournalPool, cj.namespace, cj.csiDirectory,
[]string{cj.csiNameKeyPrefix + reqName})
if err != nil {
log.ErrorLog(ctx, "failed removing oMap key %s (%s)", cj.csiNameKeyPrefix+reqName, err)
}
return err
}
/*
ReserveName adds respective entries to the csiDirectory omaps, post generating a target
UUIDDirectory for use. Further, these functions update the UUIDDirectory omaps, to store back
pointers to the CSI generated request names.
NOTE: As the function manipulates omaps, it should be called with a lock against the request name
held, to prevent parallel operations from modifying the state of the omaps for this request name.
Input arguments:
- journalPool: Pool where the CSI journal is stored
- journalPoolID: pool ID of the journalPool
- reqName: Name of the volumeGroupSnapshot request received
- namePrefix: Prefix to use when generating the volumeGroupName name (suffix is an auto-generated UUID)
Return values:
- string: Contains the UUID that was reserved for the passed in reqName
- string: Contains the VolumeGroup name that was reserved for the passed in reqName
- error: non-nil in case of any errors
*/
func (sgj *volumeGroupJournalConfig) ReserveName(ctx context.Context,
journalPool string, journalPoolID int64,
reqName, namePrefix string,
) (string, string, error) {
cj := sgj.Config
// Create the UUID based omap first, to reserve the same and avoid conflicts
// NOTE: If any service loss occurs post creation of the UUID directory, and before
// setting the request name key to point back to the UUID directory, the
// UUID directory key will be leaked
objUUID, err := reserveOMapName(
ctx,
sgj.Connection.monitors,
sgj.Connection.cr,
journalPool,
cj.namespace,
cj.cephUUIDDirectoryPrefix,
"")
if err != nil {
return "", "", err
}
groupName := generateVolumeGroupName(namePrefix, objUUID)
nameKeyVal := objUUID
// After generating the UUID Directory omap, we populate the csiDirectory
// omap with a key-value entry to map the request to the backend volume group:
// `csiNameKeyPrefix + reqName: nameKeyVal`
err = setOMapKeys(ctx, sgj.Connection, journalPool, cj.namespace, cj.csiDirectory,
map[string]string{cj.csiNameKeyPrefix + reqName: nameKeyVal})
if err != nil {
return "", "", err
}
defer func() {
if err != nil {
log.WarningLog(ctx, "reservation failed for volume group: %s", reqName)
errDefer := sgj.UndoReservation(ctx, journalPool, groupName, reqName)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of volume group: %s (%v)", reqName, errDefer)
}
}
}()
oid := cj.cephUUIDDirectoryPrefix + objUUID
omapValues := map[string]string{}
// Update UUID directory to store CSI request name
omapValues[cj.csiNameKey] = reqName
omapValues[cj.csiImageKey] = groupName
err = setOMapKeys(ctx, sgj.Connection, journalPool, cj.namespace, oid, omapValues)
if err != nil {
return "", "", err
}
return objUUID, groupName, nil
}
// VolumeGroupAttributes contains the request name and the volumeID's and
// the corresponding snapshotID's.
type VolumeGroupAttributes struct {
RequestName string // Contains the request name for the passed in UUID
GroupName string // Contains the group name
VolumeSnapshotMap map[string]string // Contains the volumeID and the corresponding snapshotID mapping
}
func (sgj *volumeGroupJournalConfig) GetVolumeGroupAttributes(
ctx context.Context,
pool, objectUUID string,
) (*VolumeGroupAttributes, error) {
var (
err error
groupAttributes = &VolumeGroupAttributes{}
cj = sgj.Config
)
values, err := listOMapValues(
ctx, sgj.Connection, pool, cj.namespace, cj.cephUUIDDirectoryPrefix+objectUUID,
cj.commonPrefix)
if err != nil {
if !errors.Is(err, util.ErrKeyNotFound) && !errors.Is(err, util.ErrPoolNotFound) {
return nil, err
}
log.WarningLog(ctx, "unable to read omap values: pool missing: %v", err)
}
groupAttributes.RequestName = values[cj.csiNameKey]
groupAttributes.GroupName = values[cj.csiImageKey]
// Remove request name key and group name key from the omap, as we are
// looking for volumeID/snapshotID mapping
delete(values, cj.csiNameKey)
delete(values, cj.csiImageKey)
groupAttributes.VolumeSnapshotMap = map[string]string{}
for k, v := range values {
groupAttributes.VolumeSnapshotMap[k] = v
}
return groupAttributes, nil
}
func (sgj *volumeGroupJournalConfig) AddVolumeSnapshotMapping(
ctx context.Context,
pool,
reservedUUID,
volumeID,
snapshotID string,
) error {
err := setOMapKeys(ctx, sgj.Connection, pool, sgj.Config.namespace, sgj.Config.cephUUIDDirectoryPrefix+reservedUUID,
map[string]string{volumeID: snapshotID})
if err != nil {
log.ErrorLog(ctx, "failed adding volume snapshot mapping: %v", err)
return err
}
return nil
}
func (sgj *volumeGroupJournalConfig) RemoveVolumeSnapshotMapping(
ctx context.Context,
pool,
reservedUUID,
volumeID string,
) error {
err := removeMapKeys(ctx, sgj.Connection, pool, sgj.Config.namespace, sgj.Config.cephUUIDDirectoryPrefix+reservedUUID,
[]string{volumeID})
if err != nil {
log.ErrorLog(ctx, "failed removing volume snapshot mapping: %v", err)
return err
}
return nil
}

View File

@ -19350,6 +19350,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -19389,6 +19392,9 @@ var awsPartition = partition{
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@ -24789,16 +24795,6 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{
Hostname: "resource-explorer-2.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
}: endpoint{
Hostname: "resource-explorer-2.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
@ -24819,11 +24815,6 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "resource-explorer-2.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@ -24839,11 +24830,6 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
@ -24854,21 +24840,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "resource-explorer-2.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
}: endpoint{
Hostname: "resource-explorer-2.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
@ -24884,16 +24860,6 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.eu-west-3.api.aws",
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "resource-explorer-2.il-central-1.api.aws",
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "resource-explorer-2.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{
@ -29041,18 +29007,36 @@ var awsPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -29065,12 +29049,27 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@ -29089,6 +29088,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@ -29098,6 +29115,18 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -29107,6 +29136,24 @@ var awsPartition = partition{
}: endpoint{
Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -35450,6 +35497,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"inspector2": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -39049,6 +39106,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
"emr-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -42289,6 +42356,15 @@ var awsusgovPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@ -42298,6 +42374,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.50.6"
const SDKVersion = "1.50.16"

4
vendor/modules.txt vendored
View File

@ -17,7 +17,7 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4
# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
## explicit
github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.50.6
# github.com/aws/aws-sdk-go v1.50.16
## explicit; go 1.19
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/auth/bearer
@ -365,7 +365,7 @@ github.com/hashicorp/hcl/hcl/token
github.com/hashicorp/hcl/json/parser
github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token
# github.com/hashicorp/vault/api v1.11.0
# github.com/hashicorp/vault/api v1.12.0
## explicit; go 1.19
github.com/hashicorp/vault/api
# github.com/hashicorp/vault/api/auth/approle v0.5.0