Merge pull request #88 from ceph/devel

Sync rhs/ceph-csi:devel with ceph/ceph-csi:devel
This commit is contained in:
OpenShift Merge Robot 2022-03-31 22:44:01 -04:00 committed by GitHub
commit 79aedad86b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
401 changed files with 34033 additions and 2089 deletions

View File

@ -41,3 +41,4 @@ rules:
- rebase
- revert
- util
- nfs

View File

@ -12,7 +12,7 @@ jobs:
# path to the retest action
- uses: ceph/ceph-csi/actions/retest@devel
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.CEPH_CSI_BOT_TOKEN }}
required-label: "ci/retry/e2e"
max-retry: "5"
required-approve-count: "2"

View File

@ -309,6 +309,13 @@ pull_request_rules:
label:
add:
- component/cephfs
- name: title contains NFS
conditions:
- "title~=nfs: "
actions:
label:
add:
- component/nfs
- name: title contains RBD
conditions:
- "title~=rbd: "

View File

@ -46,22 +46,19 @@ endif
GO_PROJECT=github.com/ceph/ceph-csi
CEPH_VERSION ?= $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION})
# TODO: ceph_preview tag may be removed with go-ceph 0.16.0
GO_TAGS_LIST ?= $(CEPH_VERSION) ceph_preview
# go build flags
LDFLAGS ?=
LDFLAGS += -X $(GO_PROJECT)/internal/util.GitCommit=$(GIT_COMMIT)
# CSI_IMAGE_VERSION will be considered as the driver version
LDFLAGS += -X $(GO_PROJECT)/internal/util.DriverVersion=$(CSI_IMAGE_VERSION)
GO_TAGS ?= -tags=$(shell echo $(GO_TAGS_LIST) | tr ' ' ',')
BASE_IMAGE ?= $(shell . $(CURDIR)/build.env ; echo $${BASE_IMAGE})
ifndef CEPH_VERSION
CEPH_VERSION = $(shell . $(CURDIR)/build.env ; echo $${CEPH_VERSION})
endif
ifdef CEPH_VERSION
# pass -tags to go commands (for go-ceph build constraints)
GO_TAGS = -tags=$(CEPH_VERSION)
endif
# passing TARGET=static-check on the 'make containerized-test' or 'make
# containerized-build' commandline will run the selected target instead of
# 'make test' in the container. Obviously other targets can be passed as well,
@ -109,8 +106,12 @@ mod-check: check-env
@echo 'running: go mod verify'
@go mod verify && [ "$(shell sha512sum go.mod)" = "`sha512sum go.mod`" ] || ( echo "ERROR: go.mod was modified by 'go mod verify'" && false )
scripts/golangci.yml: build.env scripts/golangci.yml.in
sed "s/@@CEPH_VERSION@@/$(CEPH_VERSION)/g" < scripts/golangci.yml.in > scripts/golangci.yml
scripts/golangci.yml: scripts/golangci.yml.in
rm -f scripts/golangci.yml.buildtags.in
for tag in $(GO_TAGS_LIST); do \
echo " - $$tag" >> scripts/golangci.yml.buildtags.in ; \
done
sed "/@@BUILD_TAGS@@/r scripts/golangci.yml.buildtags.in" scripts/golangci.yml.in | sed '/@@BUILD_TAGS@@/d' > scripts/golangci.yml
go-lint: scripts/golangci.yml
./scripts/lint-go.sh

View File

@ -105,6 +105,10 @@ for its support details.
| | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
| NFS | Dynamically provision, de-provision File mode RWO volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.6.0 | >= v1.0.0 | Pacific (>=16.2.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.6.0 | >= v1.5.0 | Pacific (>=16.2.0) | >= v1.22.0 |
`NOTE`: The `Alpha` status reflects possible non-backward
compatible changes in the future, and is thus not recommended

View File

@ -143,7 +143,7 @@ func main() {
}
statusList := filterStatusList(rs)
failedTestFound := false
for _, r := range statusList {
log.Printf("found context %s with status %s\n", r.GetContext(), r.GetState())
if contains([]string{"failed", "failure"}, r.GetState()) {
@ -176,6 +176,20 @@ func main() {
log.Printf("failed to create comment %v\n", err)
continue
}
failedTestFound = true
}
}
if failedTestFound {
// comment `@Mergifyio refresh` so mergifyio adds the pr back into the queue.
msg := "@Mergifyio refresh"
comment := &github.IssueComment{
Body: github.String(msg),
}
_, _, err = c.client.Issues.CreateComment(context.TODO(), c.owner, c.repo, prNumber, comment)
if err != nil {
log.Printf("failed to create comment %q: %v\n", msg, err)
continue
}
}
}

View File

@ -0,0 +1,74 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nfs
import (
"bytes"
_ "embed"
"fmt"
"text/template"
"github.com/ghodss/yaml"
storagev1 "k8s.io/api/storage/v1"
)
//go:embed csidriver.yaml
var csiDriver string
type CSIDriverValues struct {
Name string
}
var CSIDriverDefaults = CSIDriverValues{
Name: "nfs.csi.ceph.com",
}
// NewCSIDriver takes a driver name from the CSIDriverValues struct and
// replaces the value in the template. A CSIDriver object is returned which can
// be created in the Kubernetes cluster.
func NewCSIDriver(values CSIDriverValues) (*storagev1.CSIDriver, error) {
data, err := NewCSIDriverYAML(values)
if err != nil {
return nil, err
}
driver := &storagev1.CSIDriver{}
err = yaml.Unmarshal([]byte(data), driver)
if err != nil {
return nil, fmt.Errorf("failed convert YAML to %T: %w", driver, err)
}
return driver, nil
}
// NewCSIDriverYAML takes a driver name from the CSIDriverValues struct and
// replaces the value in the template. A CSIDriver object in YAML is returned
// which can be created in the Kubernetes cluster.
func NewCSIDriverYAML(values CSIDriverValues) (string, error) {
var buf bytes.Buffer
tmpl, err := template.New("CSIDriver").Parse(csiDriver)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
err = tmpl.Execute(&buf, values)
if err != nil {
return "", fmt.Errorf("failed to replace values in template: %w", err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,9 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "{{ .Name }}"
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent

View File

@ -0,0 +1,38 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nfs
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewCSIDriver(t *testing.T) {
driver, err := NewCSIDriver(CSIDriverDefaults)
require.NoError(t, err)
require.NotNil(t, driver)
require.Equal(t, driver.Name, CSIDriverDefaults.Name)
}
func TestNewCSIDriverYAML(t *testing.T) {
yaml, err := NewCSIDriverYAML(CSIDriverDefaults)
require.NoError(t, err)
require.NotEqual(t, "", yaml)
}

View File

@ -38,7 +38,7 @@ SNAPSHOT_VERSION=v5.0.1
HELM_VERSION=v3.1.2
# minikube settings
MINIKUBE_VERSION=v1.25.0
MINIKUBE_VERSION=v1.25.2
VM_DRIVER=none
CHANGE_MINIKUBE_NONE_USER=true

View File

@ -126,6 +126,8 @@ spec:
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-mountinfo
mountPath: /csi/mountinfo
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.nodeplugin.httpMetrics.enabled }}
@ -207,6 +209,10 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-mountinfo
hostPath:
path: {{ .Values.kubeletDir }}/plugins/{{ .Values.driverName }}/mountinfo
type: DirectoryOrCreate
{{- if .Values.nodeplugin.affinity }}
affinity:
{{ toYaml .Values.nodeplugin.affinity | indent 8 -}}

View File

@ -133,6 +133,9 @@ spec:
mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.nodeplugin.httpMetrics.enabled }}
@ -221,6 +224,13 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms
{{- if .Values.nodeplugin.affinity }}
affinity:
{{ toYaml .Values.nodeplugin.affinity | indent 8 -}}

View File

@ -183,6 +183,9 @@ spec:
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
{{- if .Values.provisioner.deployController }}
@ -271,6 +274,13 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 -}}

View File

@ -27,6 +27,7 @@ import (
"github.com/ceph/ceph-csi/internal/controller"
"github.com/ceph/ceph-csi/internal/controller/persistentvolume"
"github.com/ceph/ceph-csi/internal/liveness"
nfsdriver "github.com/ceph/ceph-csi/internal/nfs/driver"
rbddriver "github.com/ceph/ceph-csi/internal/rbd/driver"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -37,11 +38,13 @@ import (
const (
rbdType = "rbd"
cephFSType = "cephfs"
nfsType = "nfs"
livenessType = "liveness"
controllerType = "controller"
rbdDefaultName = "rbd.csi.ceph.com"
cephFSDefaultName = "cephfs.csi.ceph.com"
nfsDefaultName = "nfs.csi.ceph.com"
livenessDefaultName = "liveness.csi.ceph.com"
pollTime = 60 // seconds
@ -58,7 +61,7 @@ var conf util.Config
func init() {
// common flags
flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|liveness|controller]")
flag.StringVar(&conf.Vtype, "type", "", "driver type [rbd|cephfs|nfs|liveness|controller]")
flag.StringVar(&conf.Endpoint, "endpoint", "unix:///tmp/csi.sock", "CSI endpoint")
flag.StringVar(&conf.DriverName, "drivername", "", "name of the driver")
flag.StringVar(&conf.DriverNamespace, "drivernamespace", defaultNS, "namespace in which driver is deployed")
@ -149,6 +152,8 @@ func getDriverName() string {
return rbdDefaultName
case cephFSType:
return cephFSDefaultName
case nfsType:
return nfsDefaultName
case livenessType:
return livenessDefaultName
default:
@ -156,16 +161,20 @@ func getDriverName() string {
}
}
func printVersion() {
fmt.Println("Cephcsi Version:", util.DriverVersion)
fmt.Println("Git Commit:", util.GitCommit)
fmt.Println("Go Version:", runtime.Version())
fmt.Println("Compiler:", runtime.Compiler)
fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH)
if kv, err := util.GetKernelVersion(); err == nil {
fmt.Println("Kernel:", kv)
}
}
func main() {
if conf.Version {
fmt.Println("Cephcsi Version:", util.DriverVersion)
fmt.Println("Git Commit:", util.GitCommit)
fmt.Println("Go Version:", runtime.Version())
fmt.Println("Compiler:", runtime.Compiler)
fmt.Printf("Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH)
if kv, err := util.GetKernelVersion(); err == nil {
fmt.Println("Kernel:", kv)
}
printVersion()
os.Exit(0)
}
log.DefaultLog("Driver version: %s and Git version: %s", util.DriverVersion, util.GitCommit)
@ -229,6 +238,10 @@ func main() {
driver := cephfs.NewDriver()
driver.Run(&conf)
case nfsType:
driver := nfsdriver.NewDriver()
driver.Run(&conf)
case livenessType:
liveness.Run(&conf)

View File

@ -15,12 +15,16 @@
.PHONY: all
all: \
scc.yaml \
nfs/kubernetes/csidriver.yaml \
rbd/kubernetes/csidriver.yaml \
rbd/kubernetes/csi-config-map.yaml
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
$(MAKE) -C ../tools generate-deploy
nfs/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/nfs/csidriver.yaml ../api/deploy/kubernetes/nfs/csidriver.go
$(MAKE) -C ../tools generate-deploy
rbd/kubernetes/csidriver.yaml: ../api/deploy/kubernetes/rbd/csidriver.yaml ../api/deploy/kubernetes/rbd/csidriver.go
$(MAKE) -C ../tools generate-deploy

View File

@ -33,6 +33,8 @@ RUN dnf -y install \
/usr/bin/cc \
make \
git \
&& dnf clean all \
&& rm -rf /var/cache/yum \
&& true
ENV GOROOT=${GOROOT} \

View File

@ -100,6 +100,8 @@ spec:
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-mountinfo
mountPath: /csi/mountinfo
- name: liveness-prometheus
securityContext:
privileged: true
@ -164,6 +166,10 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-mountinfo
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
type: DirectoryOrCreate
---
# This is a service to expose the liveness metrics
apiVersion: v1

View File

@ -0,0 +1,16 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "nfs.csi.ceph.com"
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent

View File

@ -163,6 +163,9 @@ spec:
mountPath: /tmp/csi/keys
- name: ceph-config
mountPath: /etc/ceph/
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
- name: csi-rbdplugin-controller
# for stable functionality replace canary with latest release version
image: quay.io/cephcsi/cephcsi:canary
@ -231,3 +234,10 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms

View File

@ -118,6 +118,9 @@ spec:
mountPath: /var/log/ceph
- name: ceph-config
mountPath: /etc/ceph/
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
- name: liveness-prometheus
securityContext:
privileged: true
@ -189,6 +192,13 @@ spec:
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms
---
# This is a service to expose the liveness metrics
apiVersion: v1

View File

@ -0,0 +1,45 @@
# ceph-fuse: detection of corrupted mounts and their recovery
Mounts managed by ceph-fuse may get corrupted by e.g. the ceph-fuse process
exiting abruptly, or its parent Node Plugin container being terminated, taking
down its child processes with it.
This may manifest in concerned workloads like so:
```
# mount | grep fuse
ceph-fuse on /cephfs-share type fuse.ceph-fuse (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
# ls /cephfs-share
ls: /cephfs-share: Socket not connected
```
or,
```
# stat /home/kubelet/pods/ae344b80-3b07-4589-b1a1-ca75fa9debf2/volumes/kubernetes.io~csi/pvc-ec69de59-7823-4840-8eee-544f8261fef0/mount: transport endpoint is not connected
```
This feature allows CSI CephFS plugin to be able to detect if a ceph-fuse mount
is corrupted during the volume publishing phase, and will attempt to recover it
for the newly scheduled pod. Pods that already reside on a node whose
ceph-fuse mountpoints were broken may still need to be restarted, however.
## Detection
A mountpoint is deemed corrupted if `stat()`-ing it returns one of the
following errors:
* `ENOTCONN`
* `ESTALE`
* `EIO`
* `EACCES`
* `EHOSTDOWN`
## Recovery
Once a mountpoint corruption is detected, its recovery is performed by
remounting the volume associated with it.
Recovery is attempted only if `/csi/mountinfo` directory is made available to
CSI CephFS plugin (available by default in the Helm chart and Kubernetes
manifests).

View File

@ -382,6 +382,34 @@ the AWS KMS is expected to contain:
This Secret is expected to be created by the administrator who deployed
Ceph-CSI.
#### Configuring Amazon KMS with Amazon STS
Ceph-CSI can be configured to use
[Amazon STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html),
when kubernetes cluster is configured with OIDC identity provider to fetch
credentials to access Amazon KMS. Other functionalities is the same as
[Amazon KMS encryption](#configuring-amazon-kms).
There are a few settings that need to be included in the [KMS configuration
file](../examples/kms/vault/kms-config.yaml):
1. `encryptionKMSType`: should be set to `aws-sts-metadata`.
1. `secretName`: name of the Kubernetes Secret (in the Namespace where
PVC is created) which contains the credentials for communicating with
AWS. This defaults to `ceph-csi-aws-credentials`.
The [Secret with credentials](../examples/kms/vault/aws-sts-credentials.yaml) for
the AWS KMS is expected to contain:
1. `awsRoleARN`: Role which will be used access credentials from AWS STS
and access AWS KMS for encryption.
1. `awsCMKARN`: Custom Master Key, ARN for the key used to encrypt the
passphrase
1. `awsRegion`: the region where the AWS STS and KMS service is available.
This Secret is expected to be created by the tenant/user in each namespace where
Ceph-CSI is used to create encrypted rbd volumes.
### Encryption prerequisites
In order for encryption to work you need to make sure that `dm-crypt` kernel

View File

@ -0,0 +1,70 @@
# Dynamic provisioning of NFS volumes
Ceph has [support for NFS-Ganesha to export directories][ceph_mgr_nfs] on
CephFS. This can be used to export CephFS based volumes over NFS.
## Node-Plugin for mounting NFS-exports
The Kubernetes CSI community provides and maintains a [NFS CSI][nfs_csi]
driver. This driver can be used as a Node-Plugin so that NFS CSI volumes can be
mounted. When a CSI volume has the `server` and `share`
[parameters][nfs_csi_params], the Node-Plugin will be able to mount the
NFS-export.
## Exporting CephFS based volumes over NFS
Ceph-CSI already creates CephFS volumes, that can be mounted over the native
CephFS protocol. A new provisioner in Ceph-CSI can create CephFS volumes, and
include the required [NFS CSI parameters][nfs_csi_params] so that the [NFS CSI
driver][nfs_csi] can mount the CephFS volume over NFS.
The provisioner that handles the CSI requests for NFS volume can call the [Ceph
Mgr commands to export/unexport][ceph_mgr_nfs] the CephFS volumes. The CephFS
volumes would be internally managed by the NFS provisioner, and only be exposed
as NFS CSI volumes towards the consumers.
### `CreateVolume` CSI operation
When the Ceph-CSI NFS provisioner is requested to create a NFS CSI volume, the
following steps need to be taken:
1. create a CephFS volume, use the CephFS `CreateVolume` call or other internal
API
1. call the Ceph Mgr API to export the CephFS volume with NFS-Ganesha
1. return the NFS CSI volume, with `server` and `share` parameters (other
parameters that are useful for CephFS volume management may be kept)
The 2nd step requires a NFS-cluster name for the Ceph Mgr call(s). The name of
the NFS-cluster as managed by Ceph should be provided in the parameters of the
`CreateVolume` operation. For Kubernetes that means the parameters is set as an
option in the `StorageClass`.
The `server` parameter of the volume is an other option that is managed by the
Ceph (or Rook) infrastructure. This parameter is also required to be provided
in the `CreateVolume` parameters.
Removing the NFS-export for the volume (or other operations) requires the name
of the NFS-cluster, as it is needed for the Ceph Mgr API. Like other parameters
of the CephFS volume, it will be needed to store the NFS-cluster name in the
OMAP journalling.
### `DeleteVolume` CSI operation
The `DeleteVolume` operation only receives the `volume_id` parameter, which
is to be used by the CSI Controller (provisioner) to locate the backing volume.
The `DeleteVolume` operation for the CephFS provisioner already knows how to
delete volumes by ID.
In order to remove the exported volume from the NFS-cluster, the operation
needs to fetch the name of the NFS-cluster from the journal where it was stored
during `CreateVolume`.
### Additional CSI operations
`CreateVolume` and `DeleteVolume` are the only required operations for the CSI
Controller (provisioner). Additional features as they are supported by CephFS
can forward the operations to the CephFS provisioner at a later time.
[ceph_mgr_nfs]: https://docs.ceph.com/en/latest/mgr/nfs/
[nfs_csi]: https://github.com/kubernetes-csi/csi-driver-nfs
[nfs_csi_params]: https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/docs/driver-parameters.md

View File

@ -228,7 +228,7 @@ static CephFS PV
| :----------: | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :------: |
| clusterID | The clusterID is used by the CSI plugin to uniquely identify and use a Ceph cluster (this is the key in configmap created duing ceph-csi deployment) | Yes |
| fsName | CephFS filesystem name into which the subvolume should be created/present | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static rbd PVC | Yes |
| staticVolume | Value must be set to `true` to mount and unmount static cephFS PVC | Yes |
| rootPath | Actual path of the subvolume in ceph cluster, can be retrieved by issuing getpath command as described above | Yes |
**Note** ceph-csi does not supports CephFS subvolume deletion for static PV.

View File

@ -280,6 +280,7 @@ var _ = Describe("cephfs", func() {
It("Test CephFS CSI", func() {
pvcPath := cephFSExamplePath + "pvc.yaml"
appPath := cephFSExamplePath + "pod.yaml"
deplPath := cephFSExamplePath + "deployment.yaml"
appRWOPPath := cephFSExamplePath + "pod-rwop.yaml"
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
@ -504,6 +505,134 @@ var _ = Describe("cephfs", func() {
}
})
By("verifying that ceph-fuse recovery works for new pods", func() {
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
}
err = createCephfsStorageClass(f.ClientSet, f, true, map[string]string{
"mounter": "fuse",
})
if err != nil {
e2elog.Failf("failed to create CephFS storageclass: %v", err)
}
replicas := int32(2)
pvc, depl, err := validatePVCAndDeploymentAppBinding(
f, pvcPath, deplPath, f.UniqueName, &replicas, deployTimeout,
)
if err != nil {
e2elog.Failf("failed to create PVC and Deployment: %v", err)
}
deplPods, err := listPods(f, depl.Namespace, &metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", depl.Labels["app"]),
})
if err != nil {
e2elog.Failf("failed to list pods for Deployment: %v", err)
}
doStat := func(podName string) (stdErr string, err error) {
_, stdErr, err = execCommandInContainerByPodName(
f,
fmt.Sprintf("stat %s", depl.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath),
depl.Namespace,
podName,
depl.Spec.Template.Spec.Containers[0].Name,
)
return stdErr, err
}
ensureStatSucceeds := func(podName string) error {
stdErr, statErr := doStat(podName)
if statErr != nil || stdErr != "" {
return fmt.Errorf(
"expected stat to succeed without error output ; got err %w, stderr %s",
statErr, stdErr,
)
}
return nil
}
pod1Name, pod2Name := deplPods[0].Name, deplPods[1].Name
// stat() ceph-fuse mountpoints to make sure they are working.
for i := range deplPods {
err = ensureStatSucceeds(deplPods[i].Name)
if err != nil {
e2elog.Failf(err.Error())
}
}
// Kill ceph-fuse in cephfs-csi node plugin Pods.
nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName)
if err != nil {
e2elog.Failf("failed to get node plugin DaemonSet label selector: %v", err)
}
_, stdErr, err := execCommandInContainer(
f, "killall -9 ceph-fuse", cephCSINamespace, "csi-cephfsplugin", &metav1.ListOptions{
LabelSelector: nodePluginSelector,
},
)
if err != nil {
e2elog.Failf("killall command failed: err %v, stderr %s", err, stdErr)
}
// Verify Pod podName2 that stat()-ing the mountpoint results in ENOTCONN.
stdErr, err = doStat(pod2Name)
if err == nil || !strings.Contains(stdErr, "not connected") {
e2elog.Failf(
"expected stat to fail with 'Transport endpoint not connected' or 'Socket not connected'; got err %v, stderr %s",
err, stdErr,
)
}
// Delete podName2 Pod. This serves two purposes: it verifies that deleting pods with
// corrupted ceph-fuse mountpoints works, and it lets the replicaset controller recreate
// the pod with hopefully mounts working again.
err = deletePod(pod2Name, depl.Namespace, c, deployTimeout)
if err != nil {
e2elog.Failf(err.Error())
}
// Wait for the second Pod to be recreated.
err = waitForDeploymentComplete(c, depl.Name, depl.Namespace, deployTimeout)
if err != nil {
e2elog.Failf(err.Error())
}
// List Deployment's pods again to get name of the new pod.
deplPods, err = listPods(f, depl.Namespace, &metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", depl.Labels["app"]),
})
if err != nil {
e2elog.Failf("failed to list pods for Deployment: %v", err)
}
for i := range deplPods {
if deplPods[i].Name != pod1Name {
pod2Name = deplPods[i].Name
break
}
}
if pod2Name == "" {
podNames := make([]string, len(deplPods))
for i := range deplPods {
podNames[i] = deplPods[i].Name
}
e2elog.Failf("no new replica found ; found replicas %v", podNames)
}
// Verify Pod podName3 has its ceph-fuse mount working again.
err = ensureStatSucceeds(pod2Name)
if err != nil {
e2elog.Failf(err.Error())
}
// Delete created resources.
err = deletePVCAndDeploymentApp(f, pvc, depl)
if err != nil {
e2elog.Failf("failed to delete PVC and Deployment: %v", err)
}
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass: %v", err)
}
})
By("create a PVC and bind it to an app", func() {
err := createCephfsStorageClass(f.ClientSet, f, false, nil)
if err != nil {

View File

@ -214,6 +214,29 @@ func execCommandInContainer(
return stdOut, stdErr, err
}
func execCommandInContainerByPodName(
f *framework.Framework, shellCmd, namespace, podName, containerName string,
) (string, string, error) {
cmd := []string{"/bin/sh", "-c", shellCmd}
execOpts := framework.ExecOptions{
Command: cmd,
PodName: podName,
Namespace: namespace,
ContainerName: containerName,
Stdin: nil,
CaptureStdout: true,
CaptureStderr: true,
PreserveWhitespace: true,
}
stdOut, stdErr, err := f.ExecWithOptions(execOpts)
if stdErr != "" {
e2elog.Logf("stdErr occurred: %v", stdErr)
}
return stdOut, stdErr, err
}
func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string, error) {
opt := &metav1.ListOptions{
LabelSelector: rookToolBoxPodLabel,

View File

@ -984,7 +984,7 @@ var _ = Describe("RBD", func() {
}
app.Namespace = f.UniqueName
err = createPVCAndDeploymentApp(f, "", pvc, app, deployTimeout)
err = createPVCAndDeploymentApp(f, pvc, app, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC and application: %v", err)
}
@ -1014,7 +1014,7 @@ var _ = Describe("RBD", func() {
}
}
err = deletePVCAndDeploymentApp(f, "", pvc, app)
err = deletePVCAndDeploymentApp(f, pvc, app)
if err != nil {
e2elog.Failf("failed to delete PVC and application: %v", err)
}
@ -1093,7 +1093,7 @@ var _ = Describe("RBD", func() {
appClone.Namespace = f.UniqueName
appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name
appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
err = createPVCAndDeploymentApp(f, "", pvcClone, appClone, deployTimeout)
err = createPVCAndDeploymentApp(f, pvcClone, appClone, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC and application: %v", err)
}
@ -1131,7 +1131,7 @@ var _ = Describe("RBD", func() {
}
}
err = deletePVCAndDeploymentApp(f, "", pvcClone, appClone)
err = deletePVCAndDeploymentApp(f, pvcClone, appClone)
if err != nil {
e2elog.Failf("failed to delete PVC and application: %v", err)
}
@ -1217,7 +1217,7 @@ var _ = Describe("RBD", func() {
appClone.Namespace = f.UniqueName
appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name
appClone.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
err = createPVCAndDeploymentApp(f, "", pvcClone, appClone, deployTimeout)
err = createPVCAndDeploymentApp(f, pvcClone, appClone, deployTimeout)
if err != nil {
e2elog.Failf("failed to create PVC and application: %v", err)
}
@ -1254,7 +1254,7 @@ var _ = Describe("RBD", func() {
e2elog.Failf(stdErr)
}
}
err = deletePVCAndDeploymentApp(f, "", pvcClone, appClone)
err = deletePVCAndDeploymentApp(f, pvcClone, appClone)
if err != nil {
e2elog.Failf("failed to delete PVC and application: %v", err)
}

View File

@ -191,19 +191,12 @@ func createPVCAndApp(
return err
}
// createPVCAndDeploymentApp creates pvc and deployment, if name is not empty
// same will be set as pvc and app name.
// createPVCAndDeploymentApp creates pvc and deployment.
func createPVCAndDeploymentApp(
f *framework.Framework,
name string,
pvc *v1.PersistentVolumeClaim,
app *appsv1.Deployment,
pvcTimeout int) error {
if name != "" {
pvc.Name = name
app.Name = name
app.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
}
err := createPVCAndvalidatePV(f.ClientSet, pvc, pvcTimeout)
if err != nil {
return err
@ -213,19 +206,50 @@ func createPVCAndDeploymentApp(
return err
}
// DeletePVCAndDeploymentApp deletes pvc and deployment, if name is not empty
// same will be set as pvc and app name.
func deletePVCAndDeploymentApp(
// validatePVCAndDeploymentAppBinding creates PVC and Deployment, and waits until
// all its replicas are Running. Use `replicas` to override default number of replicas
// defined in `deploymentPath` Deployment manifest.
func validatePVCAndDeploymentAppBinding(
f *framework.Framework,
name string,
pvc *v1.PersistentVolumeClaim,
app *appsv1.Deployment) error {
if name != "" {
pvc.Name = name
app.Name = name
app.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = name
pvcPath string,
deploymentPath string,
namespace string,
replicas *int32,
pvcTimeout int,
) (*v1.PersistentVolumeClaim, *appsv1.Deployment, error) {
pvc, err := loadPVC(pvcPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to load PVC: %w", err)
}
pvc.Namespace = namespace
depl, err := loadAppDeployment(deploymentPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to load Deployment: %w", err)
}
depl.Namespace = f.UniqueName
if replicas != nil {
depl.Spec.Replicas = replicas
}
err = createPVCAndDeploymentApp(f, pvc, depl, pvcTimeout)
if err != nil {
return nil, nil, err
}
err = waitForDeploymentComplete(f.ClientSet, depl.Name, depl.Namespace, deployTimeout)
if err != nil {
return nil, nil, err
}
return pvc, depl, nil
}
// DeletePVCAndDeploymentApp deletes pvc and deployment.
func deletePVCAndDeploymentApp(
f *framework.Framework,
pvc *v1.PersistentVolumeClaim,
app *appsv1.Deployment) error {
err := deleteDeploymentApp(f.ClientSet, app.Name, app.Namespace, deployTimeout)
if err != nil {
return err

View File

@ -0,0 +1,13 @@
---
# This is an example Kubernetes Secret that can be created in the Kubernetes
# Namespace where Ceph-CSI is deployed. The contents of this Secret will be
# used to fetch credentials from Amazon STS and use it connect to the
# Amazon KMS.
apiVersion: v1
kind: Secret
metadata:
name: ceph-csi-aws-credentials
stringData:
awsRoleARN: "arn:aws:iam::111122223333:role/aws-sts-kms"
awsCMKARN: "arn:aws:kms:us-west-2:111123:key/1234cd-12ab-34cd-56ef-123590ab"
awsRegion: "us-west-2"

View File

@ -67,7 +67,12 @@ data:
"IBM_KP_SERVICE_INSTANCE_ID": "7abef064-01dd-4237-9ea5-8b3890970be3",
"IBM_KP_BASE_URL": "https://us-south.kms.cloud.ibm.com",
"IBM_KP_TOKEN_URL": "https://iam.cloud.ibm.com/oidc/token",
"IBM_KP_REGION": "us-south-2",
"IBM_KP_REGION": "us-south-2"
}
aws-sts-metadata-test: |-
{
"encryptionKMSType": "aws-sts-metadata",
"secretName": "ceph-csi-aws-credentials"
}
metadata:
name: csi-kms-connection-details

View File

@ -96,6 +96,10 @@ data:
"secretName": "ceph-csi-kp-credentials",
"keyProtectRegionKey": "us-south-2",
"keyProtectServiceInstanceID": "7abef064-01dd-4237-9ea5-8b3890970be3"
},
"aws-sts-metadata-test": {
"encryptionKMSType": "aws-sts-metadata",
"secretName": "ceph-csi-aws-credentials"
}
}
metadata:

70
examples/nfs/README.md Normal file
View File

@ -0,0 +1,70 @@
# Dynamic provisioning with NFS
The easiest way to try out the examples for dynamic provisioning with NFS, is
to use [Rook Ceph with CephNFS][rook_ceph]. Rook can be used to deploy a Ceph
cluster. Ceph is able to maintain a NFS-Ganesha service with a few commands,
making configuring the Ceph cluster a minimal effort.
## Enabling the Ceph NFS-service
Ceph does not enable the NFS-service by default. In order for Rook Ceph to be
able to configure NFS-exports, the NFS-service needs to be configured first.
In the [Rook Toolbox][rook_toolbox], run the following commands:
```console
ceph osd pool create nfs-ganesha
ceph mgr module enable rook
ceph mgr module enable nfs
ceph orch set backend rook
```
## Create a NFS-cluster
In the directory where this `README` is located, there is an example
`rook-nfs.yaml` file. This file can be used to create a Ceph managed
NFS-cluster with the name "my-nfs".
```console
$ kubectl create -f rook-nfs.yaml
cephnfs.ceph.rook.io/my-nfs created
```
The CephNFS resource will create a NFS-Ganesha Pod and Service with label
`app=rook-ceph-nfs`:
```console
$ kubectl get pods -l app=rook-ceph-nfs
NAME READY STATUS RESTARTS AGE
rook-ceph-nfs-my-nfs-a-5d47f66977-sc2rk 2/2 Running 0 61s
$ kubectl get service -l app=rook-ceph-nfs
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
rook-ceph-nfs-my-nfs-a ClusterIP 172.30.218.195 <none> 2049/TCP 2m58s
```
## Create a StorageClass
The parameters of the StorageClass reflect mostly what CephFS requires to
connect to the Ceph cluster. All required options are commented clearly in the
`storageclass.yaml` file.
In addition to the CephFS parameters, there are:
- `nfsCluster`: name of the Ceph managed NFS-cluster (here `my-nfs`)
- `server`: hostname/IP/service of the NFS-server (here `172.30.218.195`)
Edit `storageclass.yaml`, and create the resource:
```console
$ kubectl create -f storageclass.yaml
storageclass.storage.k8s.io/csi-nfs-sc created
```
## TODO: next steps
- deploy the NFS-provisioner
- deploy the kubernetes-csi/csi-driver-nfs
- create the CSIDriver object
[rook_ceph]: https://rook.io/docs/rook/latest/ceph-nfs-crd.html
[rook_toolbox]: https://rook.io/docs/rook/latest/ceph-toolbox.html

17
examples/nfs/pod.yaml Normal file
View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: cephcsi-nfs-demo-pod
spec:
containers:
- name: web-server
image: docker.io/library/nginx:latest
volumeMounts:
- name: mypvc
mountPath: /var/lib/www
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: cephcsi-nfs-pvc
readOnly: false

12
examples/nfs/pvc.yaml Normal file
View File

@ -0,0 +1,12 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cephcsi-nfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: csi-nfs-sc

View File

@ -0,0 +1,19 @@
---
apiVersion: ceph.rook.io/v1
kind: CephNFS
metadata:
name: my-nfs
namespace: default
spec:
# For Ceph v15, the rados block is required. It is ignored for Ceph v16.
rados:
# Ceph v16 always uses/expects "nfs-ganesha"
pool: nfs-ganesha
# RADOS namespace where NFS client recovery data is stored in the pool.
# fixed value for Ceph v16: the name of this CephNFS object
namespace: my-nfs
# Settings for the NFS server
server:
# the number of active NFS servers
active: 1

View File

@ -0,0 +1,49 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-nfs-sc
provisioner: nfs.csi.ceph.com
parameters:
# (required) Name of the NFS-cluster as managed by Ceph.
nfsCluster: <nfs-cluster>
# (required) Hostname, ip-address or service that points to the Ceph managed
# NFS-server that will be used for mounting the NFS-export.
server: <nfs-server>
#
# The parameters below are standard CephFS options, these are used for
# managing the underlying CephFS volume.
#
# (required) String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
# Ensure to create an entry in the configmap named ceph-csi-config, based on
# csi-config-map-sample.yaml, to accompany the string chosen to
# represent the Ceph cluster in clusterID below
clusterID: <cluster-id>
# (required) CephFS filesystem name into which the volume shall be created
# eg: fsName: myfs
fsName: <cephfs-name>
# (optional) Ceph pool into which volume data shall be stored
# pool: <cephfs-data-pool>
# The secrets have to contain user and/or Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
# (optional) Prefix to use for naming subvolumes.
# If omitted, defaults to "csi-vol-".
volumeNamePrefix: nfs-export-
reclaimPolicy: Delete
allowVolumeExpansion: false

94
go.mod
View File

@ -4,7 +4,8 @@ go 1.17
require (
github.com/IBM/keyprotect-go-client v0.7.0
github.com/aws/aws-sdk-go v1.43.8
github.com/aws/aws-sdk-go v1.43.22
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
github.com/ceph/go-ceph v0.14.0
github.com/container-storage-interface/spec v1.5.0
@ -13,29 +14,29 @@ require (
github.com/golang/protobuf v1.5.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/vault/api v1.4.1
github.com/kubernetes-csi/csi-lib-utils v0.10.0
github.com/hashicorp/vault/api v1.5.0
github.com/kubernetes-csi/csi-lib-utils v0.11.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.18.1
github.com/onsi/gomega v1.19.0
github.com/pborman/uuid v1.2.1
github.com/prometheus/client_golang v1.12.1
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
google.golang.org/grpc v1.44.0
google.golang.org/protobuf v1.27.1
k8s.io/api v0.23.4
k8s.io/apimachinery v0.23.4
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.28.0
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
k8s.io/client-go v12.0.0+incompatible
k8s.io/cloud-provider v0.23.4
k8s.io/klog/v2 v2.40.1
k8s.io/cloud-provider v0.23.5
k8s.io/klog/v2 v2.60.1
//
// when updating k8s.io/kubernetes, make sure to update the replace section too
//
k8s.io/kubernetes v1.23.4
k8s.io/mount-utils v0.23.4
k8s.io/kubernetes v1.23.5
k8s.io/mount-utils v0.23.5
k8s.io/utils v0.0.0-20211116205334-6203023598ed
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
)
@ -43,6 +44,11 @@ require (
require (
github.com/armon/go-metrics v0.3.9 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.15.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 // indirect
github.com/aws/smithy-go v1.11.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.2.0 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
@ -61,7 +67,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
@ -126,7 +132,7 @@ require (
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
@ -139,13 +145,13 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/apiserver v0.23.4 // indirect
k8s.io/component-base v0.23.4 // indirect
k8s.io/component-helpers v0.23.4 // indirect
k8s.io/apiserver v0.23.5 // indirect
k8s.io/component-base v0.23.5 // indirect
k8s.io/component-helpers v0.23.5 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.0.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
@ -160,31 +166,31 @@ replace (
//
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
//
k8s.io/api => k8s.io/api v0.23.4
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.4
k8s.io/apimachinery => k8s.io/apimachinery v0.23.4
k8s.io/apiserver => k8s.io/apiserver v0.23.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.4
k8s.io/client-go => k8s.io/client-go v0.23.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.4
k8s.io/code-generator => k8s.io/code-generator v0.23.4
k8s.io/component-base => k8s.io/component-base v0.23.4
k8s.io/component-helpers => k8s.io/component-helpers v0.23.4
k8s.io/controller-manager => k8s.io/controller-manager v0.23.4
k8s.io/cri-api => k8s.io/cri-api v0.23.4
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.4
k8s.io/kubectl => k8s.io/kubectl v0.23.4
k8s.io/kubelet => k8s.io/kubelet v0.23.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.4
k8s.io/metrics => k8s.io/metrics v0.23.4
k8s.io/mount-utils => k8s.io/mount-utils v0.23.4
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.4
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.4
k8s.io/api => k8s.io/api v0.23.5
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.5
k8s.io/apimachinery => k8s.io/apimachinery v0.23.5
k8s.io/apiserver => k8s.io/apiserver v0.23.5
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.5
k8s.io/client-go => k8s.io/client-go v0.23.5
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.5
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.5
k8s.io/code-generator => k8s.io/code-generator v0.23.5
k8s.io/component-base => k8s.io/component-base v0.23.5
k8s.io/component-helpers => k8s.io/component-helpers v0.23.5
k8s.io/controller-manager => k8s.io/controller-manager v0.23.5
k8s.io/cri-api => k8s.io/cri-api v0.23.5
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.5
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.5
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.5
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.5
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.5
k8s.io/kubectl => k8s.io/kubectl v0.23.5
k8s.io/kubelet => k8s.io/kubelet v0.23.5
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.5
k8s.io/metrics => k8s.io/metrics v0.23.5
k8s.io/mount-utils => k8s.io/mount-utils v0.23.5
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.5
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.5
// layeh.com seems to be misbehaving
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
)

132
go.sum
View File

@ -138,8 +138,20 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.43.8 h1:8a/M9C4l5CxFNM6IuNx4F1p+ITJEX12VxWxUQo61cbc=
github.com/aws/aws-sdk-go v1.43.8/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.43.22 h1:QY9/1TZB73UDEVQ68sUVJXf/7QUiHZl7zbbLF1wpqlc=
github.com/aws/aws-sdk-go v1.43.22/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.15.0 h1:f9kWLNfyCzCB43eupDAk3/XgJ2EpgktiySD6leqs0js=
github.com/aws/aws-sdk-go-v2 v1.15.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6 h1:xiGjGVQsem2cxoIX61uRGy+Jux2s9C/kKbTrWLdrU54=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.6/go.mod h1:SSPEdf9spsFgJyhjrXvawfpyzrXHBCUe+2eQ1CjC1Ak=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0 h1:bt3zw79tm209glISdMRCIVRCwvSDXxgAxh5KWe2qHkY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.0/go.mod h1:viTrxhAuejD+LszDahzAE2x40YjYWhMqzHxv2ZiWaME=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0 h1:YQ3fTXACo7xeAqg0NiqcCmBOXJruUfh+4+O2qxF2EjQ=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.0/go.mod h1:R31ot6BgESRCIoxwfKtIHzZMo/vsZn2un81g9BJ4nmo=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0 h1:0+X/rJ2+DTBKWbUsn7WtF0JvNk/fRf928vkFsXkbbZs=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.0/go.mod h1:+8k4H2ASUZZXmjx/s3DFLo9tGBb44lkz3XcgfypJY7s=
github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -437,8 +449,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@ -635,8 +648,8 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
github.com/hashicorp/vault/api v1.4.1 h1:mWLfPT0RhxBitjKr6swieCEP2v5pp/M//t70S3kMLRo=
github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM=
github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28=
github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM=
github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU=
github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU=
@ -729,8 +742,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.10.0 h1:Aqm8X81eCzzfH/bvIEqSWtcbK9HF9NbFk4d+le1snVA=
github.com/kubernetes-csi/csi-lib-utils v0.10.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0=
github.com/kubernetes-csi/csi-lib-utils v0.11.0 h1:FHWOBtAZBA/hVk7v/qaXgG9Sxv0/n06DebPFuDwumqg=
github.com/kubernetes-csi/csi-lib-utils v0.11.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
@ -852,8 +865,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -861,8 +874,8 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@ -1045,8 +1058,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@ -1266,8 +1280,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1619,8 +1634,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
@ -1628,8 +1643,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1690,28 +1706,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E=
k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI=
k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU=
k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g=
k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM=
k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.23.4 h1:zNvQlG+C/ERjuUz4p7eY/0IWHaMixRSBoxgmyIdwo9Y=
k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc=
k8s.io/cli-runtime v0.23.4/go.mod h1:7KywUNTUibmHPqmpDFuRO1kc9RhsufHv2lkjCm2YZyM=
k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU=
k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0=
k8s.io/cloud-provider v0.23.4 h1:Nx42V7+Vpaad3qZE031MpTfCDl3jeQrX6wuwieES/nc=
k8s.io/cloud-provider v0.23.4/go.mod h1:+RFNcj7DczZJE250/l55hh4Be4tlHkNgdtmI4PzxhJ0=
k8s.io/cluster-bootstrap v0.23.4/go.mod h1:H5UZ3a4ZvjyUIgTgW8VdnN1rm3DsRqhotqK9oDMHU1o=
k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/component-base v0.23.4 h1:SziYh48+QKxK+ykJ3Ejqd98XdZIseVBG7sBaNLPqy6M=
k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E=
k8s.io/component-helpers v0.23.4 h1:zCLeBuo3Qs0BqtJu767RXJgs5S9ruFJZcbM1aD+cMmc=
k8s.io/component-helpers v0.23.4/go.mod h1:1Pl7L4zukZ054ElzRbvmZ1FJIU8roBXFOeRFu8zipa4=
k8s.io/controller-manager v0.23.4/go.mod h1:+ednTkO5Z25worecG5ORa7NssZT0cpuVunVHN+24Ccs=
k8s.io/cri-api v0.23.4/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
k8s.io/csi-translation-lib v0.23.4/go.mod h1:hvAm5aoprpfE7p9Xnfe3ObmbhDcYp3U7AZJnVQUlrqw=
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.23.5 h1:2Ly8oUjz5cnZRn1YwYr+aFgDZzUmEVL9RscXbnIeDSE=
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
k8s.io/cli-runtime v0.23.5/go.mod h1:oY6QDF2qo9xndSq32tqcmRp2UyXssdGrLfjAVymgbx4=
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
k8s.io/cloud-provider v0.23.5 h1:cf5Il2oV++RtlqgNesHd+tDFtOp85dG0t9KN/pmb71s=
k8s.io/cloud-provider v0.23.5/go.mod h1:xMZFA6pIYKweqTkWCYVgRSVMAjqOvxVr3u/kmfyxvkU=
k8s.io/cluster-bootstrap v0.23.5/go.mod h1:8/Gz6VTOMmEDDhn8U/nx0McnQR4YETAqiYXIlqR8hdQ=
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
k8s.io/component-helpers v0.23.5 h1:6uTMNP6xxJrSzYTC7BCcH2S/PbSZGxSUZG0PG+nT4tM=
k8s.io/component-helpers v0.23.5/go.mod h1:5riXJgjTIs+ZB8xnf5M2anZ8iQuq37a0B/0BgoPQuSM=
k8s.io/controller-manager v0.23.5/go.mod h1:n/KRlUzAtkFcZodZ/w0GlQdmErVKh7lS/wS0bbo7W4I=
k8s.io/cri-api v0.23.5/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
k8s.io/csi-translation-lib v0.23.5/go.mod h1:8RyFkoHAJrFU7c7MN1ZUjctm3ZhHclKm1FIHNSyGcuw=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@ -1722,28 +1738,28 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.23.4/go.mod h1:hpmPi4oaLBe014CkBCqzBYWok64H2C7Ka6FBLJvHgkg=
k8s.io/kube-controller-manager v0.23.4/go.mod h1:r4Cn9Y8t3GyMPrPnOGCDRpeyEKVOITuwHJ7pIWXH0IY=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.23.5/go.mod h1:3ynYx07Co6dzjpKPgipM+1/Mt2Jcm7dY++cRlKLr5s8=
k8s.io/kube-controller-manager v0.23.5/go.mod h1:Pkg5lIk9YG9Qjj4F7Dn0gi6/k8cEYP63oLdgrlrrtu4=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-proxy v0.23.4/go.mod h1:uZBvTCJYVBqnlyup3JpXaMmqrlkzHjcakHhf7ojYUKk=
k8s.io/kube-scheduler v0.23.4/go.mod h1:KNKYvMZ8dhoMLYygiEMEK+JKFQ2fhW2CLj7B5zEQ/68=
k8s.io/kubectl v0.23.4 h1:mAa+zEOlyZieecEy+xSrhjkpMcukYyHWzcNdX28dzMY=
k8s.io/kubectl v0.23.4/go.mod h1:Dgb0Rvx/8JKS/C2EuvsNiQc6RZnX0SbHJVG3XUzH6ok=
k8s.io/kubelet v0.23.4 h1:yptgklhQ3dtHHIpH/RgI0861XWoJ9/YIBnnxYS6l8VI=
k8s.io/kubelet v0.23.4/go.mod h1:RjbycP9Wnpbw33G8yFt9E23+pFYxzWy1d8qHU0KVUgg=
k8s.io/kubernetes v1.23.4 h1:25dqAMS96u+9L/A7AHdEW7aMTcmHoQMbMPug6Fa61JE=
k8s.io/kubernetes v1.23.4/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io=
k8s.io/legacy-cloud-providers v0.23.4/go.mod h1:dl0qIfmTyeDpRe/gaudDVnLsykKW2DE7oBWbuJl2Gd8=
k8s.io/metrics v0.23.4/go.mod h1:cl6sY9BdVT3DubbpqnkPIKi6mn/F2ltkU4yH1tEJ3Bo=
k8s.io/mount-utils v0.23.4 h1:tWUj5A0DJ29haMiO7F3pNdP2HwyMWczzvqQmikFc9s8=
k8s.io/mount-utils v0.23.4/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98=
k8s.io/pod-security-admission v0.23.4/go.mod h1:cikO3akkUoTZ8uFhkHdlWp0m3XosiOqssTHb+TfCjLw=
k8s.io/sample-apiserver v0.23.4/go.mod h1:ITqvv82GqqeRue7dmsP7A/As/MHE2v1H3vriNRFv+/U=
k8s.io/kube-proxy v0.23.5/go.mod h1:/yCbRrOHgPCb1g1k4XmMJPmNesfdPhZTGrvwNlNgwo8=
k8s.io/kube-scheduler v0.23.5/go.mod h1:IJGf4WngeoAHLj4ms4n3Poa29ttmaxCXxIqpgU0ky7E=
k8s.io/kubectl v0.23.5 h1:DmDULqCaF4qstj0Im143XmncvqWtJxHzK8IrW2BzlU0=
k8s.io/kubectl v0.23.5/go.mod h1:lLgw7cVY8xbd7o637vOXPca/w6HC205KsPCRDYRCxwE=
k8s.io/kubelet v0.23.5 h1:eCGJ7olStiyF7TYHlUTjpXg2ltw7Bs9OPZcch8HP2Go=
k8s.io/kubelet v0.23.5/go.mod h1:M0aj0gaX+rOaGfCfqkV6P7QbwtMwqbL6RdwviHmnehU=
k8s.io/kubernetes v1.23.5 h1:bxpSv2BKc2MqYRfyqQqLVdodLZ2r+NZ/rEdZXyUAvug=
k8s.io/kubernetes v1.23.5/go.mod h1:avI3LUTUYZugxwh52KMVM7v9ZjB5gYJ6D3FIoZ1SHUo=
k8s.io/legacy-cloud-providers v0.23.5/go.mod h1:IENlwY686f1fbakotgNf7gAQuIyCvOUIAXkPPPE/7KU=
k8s.io/metrics v0.23.5/go.mod h1:WNAtV2a5BYbmDS8+7jSqYYV6E3efuGTpIwJ8PTD1wgs=
k8s.io/mount-utils v0.23.5 h1:MOhJKZTpfC21r5OamKYWMdVNtTMDD9wZfTkLOhI5nuE=
k8s.io/mount-utils v0.23.5/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98=
k8s.io/pod-security-admission v0.23.5/go.mod h1:aSyWfjev8Zil5DaZBZ+ICAObZmZlRqhnAZHxA9r71UI=
k8s.io/sample-apiserver v0.23.5/go.mod h1:m4cnT3HgRY5Dt2AjMVKGnb31D6rGY0B+xpKtRJUUC8w=
k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@ -1760,8 +1776,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 h1:KQOkVzXrLNb0EP6W0FD6u3CCPAwgXFYwZitbj7K0P0Y=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2 h1:+ReKrjTrd57mtAU19BJkxSAaWRIQkFlaWcO6dGFVP1g=
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=

View File

@ -27,6 +27,7 @@ import (
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -148,6 +149,35 @@ func checkContentSource(
return nil, nil, nil, status.Errorf(codes.InvalidArgument, "not a proper volume source %v", volumeSource)
}
// checkValidCreateVolumeRequest checks if the request is valid
// CreateVolumeRequest by inspecting the request parameters.
func checkValidCreateVolumeRequest(
vol,
parentVol *store.VolumeOptions,
pvID *store.VolumeIdentifier,
sID *store.SnapshotIdentifier) error {
switch {
case pvID != nil:
if vol.Size < parentVol.Size {
return fmt.Errorf(
"cannot clone from volume %s: volume size %d is smaller than source volume size %d",
pvID.VolumeID,
parentVol.Size,
vol.Size)
}
case sID != nil:
if vol.Size < parentVol.Size {
return fmt.Errorf(
"cannot restore from snapshot %s: volume size %d is smaller than source volume size %d",
sID.SnapshotID,
parentVol.Size,
vol.Size)
}
}
return nil
}
// CreateVolume creates a reservation and the volume in backend, if it is not already present.
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
func (cs *ControllerServer) CreateVolume(
@ -199,6 +229,11 @@ func (cs *ControllerServer) CreateVolume(
defer parentVol.Destroy()
}
err = checkValidCreateVolumeRequest(volOptions, parentVol, pvID, sID)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
if err != nil {
if cerrors.IsCloneRetryError(err) {
@ -233,7 +268,8 @@ func (cs *ControllerServer) CreateVolume(
}
}
volumeContext := req.GetParameters()
// remove kubernetes csi prefixed parameters.
volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters())
volumeContext["subvolumeName"] = vID.FsSubvolName
volumeContext["subvolumePath"] = volOptions.RootPath
volume := &csi.Volume{
@ -306,7 +342,8 @@ func (cs *ControllerServer) CreateVolume(
log.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
vID.FsSubvolName, requestName)
volumeContext := req.GetParameters()
// remove kubernetes csi prefixed parameters.
volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters())
volumeContext["subvolumeName"] = vID.FsSubvolName
volumeContext["subvolumePath"] = volOptions.RootPath
volume := &csi.Volume{
@ -786,10 +823,10 @@ func (cs *ControllerServer) DeleteSnapshot(
// success as deletion is complete
return &csi.DeleteSnapshotResponse{}, nil
case errors.Is(err, cerrors.ErrSnapNotFound):
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err)
sid.RequestName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error())
}
@ -799,10 +836,10 @@ func (cs *ControllerServer) DeleteSnapshot(
// if the error is ErrVolumeNotFound, the subvolume is already deleted
// from backend, Hence undo the omap entries and return success
log.ErrorLog(ctx, "Volume not present")
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err)
sid.RequestName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error())
}
@ -837,7 +874,7 @@ func (cs *ControllerServer) DeleteSnapshot(
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err)

View File

@ -237,7 +237,6 @@ func (s *subVolumeClient) CreateVolume(ctx context.Context) error {
opts.PoolLayout = s.Pool
}
fmt.Println("this is for debugging ")
// FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID)
err = ca.CreateSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, &opts)
if err != nil {

View File

@ -0,0 +1,273 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
mountutil "k8s.io/mount-utils"
)
type (
mountState int
)
const (
msUnknown mountState = iota
msNotMounted
msMounted
msCorrupted
// ceph-fuse fsType in /proc/<PID>/mountinfo.
cephFuseFsType = "fuse.ceph-fuse"
)
func (ms mountState) String() string {
return [...]string{
"UNKNOWN",
"NOT_MOUNTED",
"MOUNTED",
"CORRUPTED",
}[int(ms)]
}
func getMountState(path string) (mountState, error) {
isMnt, err := util.IsMountPoint(path)
if err != nil {
if util.IsCorruptedMountError(err) {
return msCorrupted, nil
}
return msUnknown, err
}
if isMnt {
return msMounted, nil
}
return msNotMounted, nil
}
func findMountinfo(mountpoint string, mis []mountutil.MountInfo) int {
for i := range mis {
if mis[i].MountPoint == mountpoint {
return i
}
}
return -1
}
// Ensures that given mountpoint is of specified fstype.
// Returns true if fstype matches, or if no such mountpoint exists.
func validateFsType(mountpoint, fsType string, mis []mountutil.MountInfo) bool {
if idx := findMountinfo(mountpoint, mis); idx > 0 {
mi := mis[idx]
if mi.FsType != fsType {
return false
}
}
return true
}
// tryRestoreFuseMountsInNodePublish tries to restore staging and publish
// volume moutpoints inside the NodePublishVolume call.
//
// Restoration is performed in following steps:
// 1. Detection: staging target path must be a working mountpoint, and target
// path must not be a corrupted mountpoint (see getMountState()). If either
// of those checks fail, mount recovery is performed.
// 2. Recovery preconditions:
// * NodeStageMountinfo is present for this volume,
// * if staging target path and target path are mountpoints, they must be
// managed by ceph-fuse,
// * VolumeOptions.Mounter must evaluate to "fuse".
// 3. Recovery:
// * staging target path is unmounted and mounted again using ceph-fuse,
// * target path is only unmounted; NodePublishVolume is then expected to
// continue normally.
func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
ctx context.Context,
volID fsutil.VolumeID,
stagingTargetPath string,
targetPath string,
volContext map[string]string,
) error {
// Check if there is anything to restore.
stagingTargetMs, err := getMountState(stagingTargetPath)
if err != nil {
return err
}
targetMs, err := getMountState(targetPath)
if err != nil {
return err
}
if stagingTargetMs == msMounted && targetMs != msCorrupted {
// Mounts seem to be fine.
return nil
}
// Something is broken. Try to proceed with mount recovery.
log.WarningLog(ctx, "cephfs: mount problem detected when publishing a volume: %s is %s, %s is %s; attempting recovery",
stagingTargetPath, stagingTargetMs, targetPath, targetMs)
// NodeStageMountinfo entry must be present for this volume.
var nsMountinfo *fsutil.NodeStageMountinfo
if nsMountinfo, err = fsutil.GetNodeStageMountinfo(volID); err != nil {
return err
} else if nsMountinfo == nil {
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery because NodeStageMountinfo record is missing")
return nil
}
// Check that the existing stage and publish mounts for this volume are
// managed by ceph-fuse, and that the mounter is of the FuseMounter type.
// Then try to restore them.
var (
volMounter mounter.VolumeMounter
volOptions *store.VolumeOptions
)
procMountInfo, err := util.ReadMountInfoForProc("self")
if err != nil {
return err
}
if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) ||
!validateFsType(targetPath, cephFuseFsType, procMountInfo) {
// We can't restore mounts not managed by ceph-fuse.
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints")
return nil
}
volOptions, err = ns.getVolumeOptions(ctx, volID, volContext, nsMountinfo.Secrets)
if err != nil {
return err
}
volMounter, err = mounter.New(volOptions)
if err != nil {
return err
}
if _, ok := volMounter.(*mounter.FuseMounter); !ok {
// We can't restore mounts with non-FUSE mounter.
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter")
return nil
}
// Try to restore mount in staging target path.
// Unmount and mount the volume.
if stagingTargetMs != msMounted {
if err := mounter.UnmountVolume(ctx, stagingTargetPath); err != nil {
return err
}
if err := ns.mount(
ctx,
volMounter,
volOptions,
volID,
stagingTargetPath,
nsMountinfo.Secrets,
nsMountinfo.VolumeCapability,
); err != nil {
return err
}
}
// Try to restore mount in target path.
// Only unmount the bind mount. NodePublishVolume should then
// create the bind mount by itself.
if err := mounter.UnmountVolume(ctx, targetPath); err != nil {
return err
}
return nil
}
// Try to restore FUSE mount of the staging target path in NodeStageVolume.
// If corruption is detected, try to only unmount the volume. NodeStageVolume
// should be able to continue with mounting the volume normally afterwards.
func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
ctx context.Context,
mnt mounter.VolumeMounter,
stagingTargetPath string,
) error {
// Check if there is anything to restore.
stagingTargetMs, err := getMountState(stagingTargetPath)
if err != nil {
return err
}
if stagingTargetMs != msCorrupted {
// Mounts seem to be fine.
return nil
}
// Something is broken. Try to proceed with mount recovery.
log.WarningLog(ctx, "cephfs: mountpoint problem detected when staging a volume: %s is %s; attempting recovery",
stagingTargetPath, stagingTargetMs)
// Check that the existing stage mount for this volume is managed by
// ceph-fuse, and that the mounter is FuseMounter. Then try to restore them.
procMountInfo, err := util.ReadMountInfoForProc("self")
if err != nil {
return err
}
if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) {
// We can't restore mounts not managed by ceph-fuse.
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints")
return nil
}
if _, ok := mnt.(*mounter.FuseMounter); !ok {
// We can't restore mounts with non-FUSE mounter.
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter")
return nil
}
// Restoration here means only unmounting the volume.
// NodeStageVolume should take care of the rest.
return mounter.UnmountVolume(ctx, stagingTargetPath)
}

View File

@ -47,11 +47,10 @@ type NodeServer struct {
func getCredentialsForVolume(
volOptions *store.VolumeOptions,
req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
secrets map[string]string) (*util.Credentials, error) {
var (
err error
cr *util.Credentials
secrets = req.GetSecrets()
err error
cr *util.Credentials
)
if volOptions.ProvisionVolume {
@ -64,7 +63,7 @@ func getCredentialsForVolume(
} else {
// The volume is pre-made, credentials are in node stage secrets
cr, err = util.NewUserCredentials(req.GetSecrets())
cr, err = util.NewUserCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %w", err)
}
@ -73,11 +72,38 @@ func getCredentialsForVolume(
return cr, nil
}
func (ns *NodeServer) getVolumeOptions(
ctx context.Context,
volID fsutil.VolumeID,
volContext,
volSecrets map[string]string,
) (*store.VolumeOptions, error) {
volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), volContext, volSecrets)
if err != nil {
if !errors.Is(err, cerrors.ErrInvalidVolID) {
return nil, status.Error(codes.Internal, err.Error())
}
volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), volContext)
if err != nil {
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
return nil, status.Error(codes.Internal, err.Error())
}
volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), volContext, volSecrets)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
return volOptions, nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var volOptions *store.VolumeOptions
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err
}
@ -94,31 +120,25 @@ func (ns *NodeServer) NodeStageVolume(
}
defer ns.VolumeLocks.Release(req.GetVolumeId())
volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
volOptions, err := ns.getVolumeOptions(ctx, volID, req.GetVolumeContext(), req.GetSecrets())
if err != nil {
if !errors.Is(err, cerrors.ErrInvalidVolID) {
return nil, status.Error(codes.Internal, err.Error())
}
// gets mon IPs from the supplied cluster info
volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
if err != nil {
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
return nil, status.Error(codes.Internal, err.Error())
}
// get mon IPs from the volume context
volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
return nil, status.Error(codes.Internal, err.Error())
}
defer volOptions.Destroy()
mnt, err := mounter.New(volOptions)
if err != nil {
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
// Check if the volume is already mounted
if err = ns.tryRestoreFuseMountInNodeStage(ctx, mnt, stagingTargetPath); err != nil {
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
}
isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil {
log.ErrorLog(ctx, "stat failed: %v", err)
@ -133,20 +153,53 @@ func (ns *NodeServer) NodeStageVolume(
}
// It's not, mount now
if err = ns.mount(ctx, volOptions, req); err != nil {
if err = ns.mount(
ctx,
mnt,
volOptions,
fsutil.VolumeID(req.GetVolumeId()),
req.GetStagingTargetPath(),
req.GetSecrets(),
req.GetVolumeCapability(),
); err != nil {
return nil, err
}
log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
if _, isFuse := mnt.(*mounter.FuseMounter); isFuse {
// FUSE mount recovery needs NodeStageMountinfo records.
if err = fsutil.WriteNodeStageMountinfo(volID, &fsutil.NodeStageMountinfo{
VolumeCapability: req.GetVolumeCapability(),
Secrets: req.GetSecrets(),
}); err != nil {
log.ErrorLog(ctx, "cephfs: failed to write NodeStageMountinfo for volume %s: %v", volID, err)
// Try to clean node stage mount.
if unmountErr := mounter.UnmountVolume(ctx, stagingTargetPath); unmountErr != nil {
log.ErrorLog(ctx, "cephfs: failed to unmount %s in WriteNodeStageMountinfo clean up: %v",
stagingTargetPath, unmountErr)
}
return nil, status.Error(codes.Internal, err.Error())
}
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := fsutil.VolumeID(req.GetVolumeId())
cr, err := getCredentialsForVolume(volOptions, req)
func (*NodeServer) mount(
ctx context.Context,
mnt mounter.VolumeMounter,
volOptions *store.VolumeOptions,
volID fsutil.VolumeID,
stagingTargetPath string,
secrets map[string]string,
volCap *csi.VolumeCapability,
) error {
cr, err := getCredentialsForVolume(volOptions, secrets)
if err != nil {
log.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
@ -154,20 +207,13 @@ func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, r
}
defer cr.DeleteCredentials()
m, err := mounter.New(volOptions)
if err != nil {
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.Name())
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
readOnly := "ro"
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
switch m.(type) {
if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
switch mnt.(type) {
case *mounter.FuseMounter:
if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) {
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly)
@ -179,7 +225,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, r
}
}
if err = m.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
if err = mnt.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
log.ErrorLog(ctx,
"failed to mount volume %s: %v Check dmesg logs if required.",
volID,
@ -201,8 +247,9 @@ func (ns *NodeServer) NodePublishVolume(
return nil, err
}
stagingTargetPath := req.GetStagingTargetPath()
targetPath := req.GetTargetPath()
volID := req.GetVolumeId()
volID := fsutil.VolumeID(req.GetVolumeId())
// Considering kubelet make sure the stage and publish operations
// are serialized, we dont need any extra locking in nodePublish
@ -213,12 +260,34 @@ func (ns *NodeServer) NodePublishVolume(
return nil, status.Error(codes.Internal, err.Error())
}
if err := ns.tryRestoreFuseMountsInNodePublish(
ctx,
volID,
stagingTargetPath,
targetPath,
req.GetVolumeContext(),
); err != nil {
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
}
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
// Ensure staging target path is a mountpoint.
if isMnt, err := util.IsMountPoint(stagingTargetPath); err != nil {
log.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())
} else if !isMnt {
return nil, status.Errorf(
codes.Internal, "staging path %s for volume %s is not a mountpoint", stagingTargetPath, volID,
)
}
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(targetPath)
@ -238,8 +307,8 @@ func (ns *NodeServer) NodePublishVolume(
if err = mounter.BindMount(
ctx,
req.GetStagingTargetPath(),
req.GetTargetPath(),
stagingTargetPath,
targetPath,
req.GetReadonly(),
mountOptions); err != nil {
log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
@ -260,11 +329,14 @@ func (ns *NodeServer) NodeUnpublishVolume(
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
return nil, err
}
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called
// at same time, an explicit locking at time of nodeunpublish is not required.
targetPath := req.GetTargetPath()
isMnt, err := util.IsMountPoint(targetPath)
if err != nil {
log.ErrorLog(ctx, "stat failed: %v", err)
if os.IsNotExist(err) {
// targetPath has already been deleted
log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
@ -272,7 +344,14 @@ func (ns *NodeServer) NodeUnpublishVolume(
return &csi.NodeUnpublishVolumeResponse{}, nil
}
return nil, status.Error(codes.Internal, err.Error())
if !util.IsCorruptedMountError(err) {
return nil, status.Error(codes.Internal, err.Error())
}
// Corrupted mounts need to be unmounted properly too,
// regardless of the mounter used. Continue as normal.
log.DebugLog(ctx, "cephfs: detected corrupted mount in publish target path %s, trying to unmount anyway", targetPath)
isMnt = true
}
if !isMnt {
if err = os.RemoveAll(targetPath); err != nil {
@ -316,8 +395,16 @@ func (ns *NodeServer) NodeUnstageVolume(
stagingTargetPath := req.GetStagingTargetPath()
if err = fsutil.RemoveNodeStageMountinfo(fsutil.VolumeID(volID)); err != nil {
log.ErrorLog(ctx, "cephfs: failed to remove NodeStageMountinfo for volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil {
log.ErrorLog(ctx, "stat failed: %v", err)
if os.IsNotExist(err) {
// targetPath has already been deleted
log.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
@ -325,7 +412,16 @@ func (ns *NodeServer) NodeUnstageVolume(
return &csi.NodeUnstageVolumeResponse{}, nil
}
return nil, status.Error(codes.Internal, err.Error())
if !util.IsCorruptedMountError(err) {
return nil, status.Error(codes.Internal, err.Error())
}
// Corrupted mounts need to be unmounted properly too,
// regardless of the mounter used. Continue as normal.
log.DebugLog(ctx,
"cephfs: detected corrupted mount in staging target path %s, trying to unmount anyway",
stagingTargetPath)
isMnt = true
}
if !isMnt {
return &csi.NodeUnstageVolumeResponse{}, nil

View File

@ -0,0 +1,149 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"os"
"path"
"github.com/container-storage-interface/spec/lib/go/csi"
// google.golang.org/protobuf/encoding doesn't offer MessageV2().
"github.com/golang/protobuf/proto" // nolint:staticcheck // See comment above.
"google.golang.org/protobuf/encoding/protojson"
)
// This file provides functionality to store various mount information
// in a file. It's currently used to restore ceph-fuse mounts.
// Mount info is stored in `/csi/mountinfo`.
const (
mountinfoDir = "/csi/mountinfo"
)
// nodeStageMountinfoRecord describes a single
// record of mountinfo of a staged volume.
// encoding/json-friendly format.
// Only for internal use for marshaling and unmarshaling.
type nodeStageMountinfoRecord struct {
VolumeCapabilityProtoJSON string `json:",omitempty"`
MountOptions []string `json:",omitempty"`
Secrets map[string]string `json:",omitempty"`
}
// NodeStageMountinfo describes mountinfo of a volume.
type NodeStageMountinfo struct {
VolumeCapability *csi.VolumeCapability
Secrets map[string]string
MountOptions []string
}
func fmtNodeStageMountinfoFilename(volID VolumeID) string {
return path.Join(mountinfoDir, fmt.Sprintf("nodestage-%s.json", volID))
}
func (mi *NodeStageMountinfo) toNodeStageMountinfoRecord() (*nodeStageMountinfoRecord, error) {
bs, err := protojson.Marshal(proto.MessageV2(mi.VolumeCapability))
if err != nil {
return nil, err
}
return &nodeStageMountinfoRecord{
VolumeCapabilityProtoJSON: string(bs),
MountOptions: mi.MountOptions,
Secrets: mi.Secrets,
}, nil
}
func (r *nodeStageMountinfoRecord) toNodeStageMountinfo() (*NodeStageMountinfo, error) {
volCapability := &csi.VolumeCapability{}
if err := protojson.Unmarshal([]byte(r.VolumeCapabilityProtoJSON), proto.MessageV2(volCapability)); err != nil {
return nil, err
}
return &NodeStageMountinfo{
VolumeCapability: volCapability,
MountOptions: r.MountOptions,
Secrets: r.Secrets,
}, nil
}
// WriteNodeStageMountinfo writes mount info to a file.
func WriteNodeStageMountinfo(volID VolumeID, mi *NodeStageMountinfo) error {
// Write NodeStageMountinfo into JSON-formatted byte slice.
r, err := mi.toNodeStageMountinfoRecord()
if err != nil {
return err
}
bs, err := json.Marshal(r)
if err != nil {
return err
}
// Write the byte slice into file.
err = os.WriteFile(fmtNodeStageMountinfoFilename(volID), bs, 0o600)
if os.IsNotExist(err) {
return nil
}
return err
}
// GetNodeStageMountinfo tries to retrieve NodeStageMountinfoRecord for `volID`.
// If it doesn't exist, `(nil, nil)` is returned.
func GetNodeStageMountinfo(volID VolumeID) (*NodeStageMountinfo, error) {
// Read the file.
bs, err := os.ReadFile(fmtNodeStageMountinfoFilename(volID))
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
// Unmarshall JSON-formatted byte slice into NodeStageMountinfo struct.
r := &nodeStageMountinfoRecord{}
if err = json.Unmarshal(bs, r); err != nil {
return nil, err
}
mi, err := r.toNodeStageMountinfo()
if err != nil {
return nil, err
}
return mi, err
}
// RemoveNodeStageMountinfo tries to remove NodeStageMountinfo for `volID`.
// If no such record exists for `volID`, it's considered success too.
func RemoveNodeStageMountinfo(volID VolumeID) error {
if err := os.Remove(fmtNodeStageMountinfoFilename(volID)); err != nil {
if !os.IsNotExist(err) {
return err
}
}
return nil
}

View File

@ -139,6 +139,12 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
if pv.Spec.CSI == nil || pv.Spec.CSI.Driver != r.config.DriverName {
return nil
}
// PV is not attached to any PVC
if pv.Spec.ClaimRef == nil {
return nil
}
pvcNamespace := pv.Spec.ClaimRef.Namespace
requestName := pv.Name
volumeHandler := pv.Spec.CSI.VolumeHandle
secretName := ""
@ -171,7 +177,7 @@ func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.
}
defer cr.DeleteCredentials()
rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, cr)
rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, pvcNamespace, cr)
if err != nil {
log.ErrorLogMsg("failed to regenerate journal %s", err)

View File

@ -45,13 +45,6 @@ func (cs *DefaultControllerServer) ControllerUnpublishVolume(
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerExpandVolume expand volume.
func (cs *DefaultControllerServer) ControllerExpandVolume(
ctx context.Context,
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListVolumes lists volumes.
func (cs *DefaultControllerServer) ListVolumes(
ctx context.Context,
@ -81,20 +74,6 @@ func (cs *DefaultControllerServer) ControllerGetCapabilities(
}, nil
}
// CreateSnapshot creates snapshot.
func (cs *DefaultControllerServer) CreateSnapshot(
ctx context.Context,
req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// DeleteSnapshot deletes snapshot.
func (cs *DefaultControllerServer) DeleteSnapshot(
ctx context.Context,
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListSnapshots lists snapshots.
func (cs *DefaultControllerServer) ListSnapshots(
ctx context.Context,

View File

@ -32,20 +32,6 @@ type DefaultNodeServer struct {
Type string
}
// NodeStageVolume returns unimplemented response.
func (ns *DefaultNodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// NodeUnstageVolume returns unimplemented response.
func (ns *DefaultNodeServer) NodeUnstageVolume(
ctx context.Context,
req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// NodeExpandVolume returns unimplemented response.
func (ns *DefaultNodeServer) NodeExpandVolume(
ctx context.Context,
@ -88,13 +74,6 @@ func (ns *DefaultNodeServer) NodeGetCapabilities(
}, nil
}
// NodeGetVolumeStats returns volume stats.
func (ns *DefaultNodeServer) NodeGetVolumeStats(
ctx context.Context,
req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ConstructMountOptions returns only unique mount options in slice.
func ConstructMountOptions(mountOptions []string, volCap *csi.VolumeCapability) []string {
if m := volCap.GetMount(); m != nil {

View File

@ -742,6 +742,36 @@ func (conn *Connection) StoreImageID(ctx context.Context, pool, reservedUUID, im
return nil
}
// StoreAttribute stores an attribute (key/value) in omap.
func (conn *Connection) StoreAttribute(ctx context.Context, pool, reservedUUID, attribute, value string) error {
key := conn.config.commonPrefix + attribute
err := setOMapKeys(ctx, conn, pool, conn.config.namespace, conn.config.cephUUIDDirectoryPrefix+reservedUUID,
map[string]string{key: value})
if err != nil {
return fmt.Errorf("failed to set key %q to %q: %w", key, value, err)
}
return nil
}
// FetchAttribute fetches an attribute (key) in omap.
func (conn *Connection) FetchAttribute(ctx context.Context, pool, reservedUUID, attribute string) (string, error) {
key := conn.config.commonPrefix + attribute
values, err := getOMapValues(
ctx, conn, pool, conn.config.namespace, conn.config.cephUUIDDirectoryPrefix+reservedUUID,
conn.config.commonPrefix, []string{key})
if err != nil {
return "", fmt.Errorf("failed to get values for key %q from OMAP: %w", key, err)
}
value, ok := values[key]
if !ok {
return "", fmt.Errorf("failed to find key %q in returned map: %v", key, values)
}
return value, nil
}
// Destroy frees any resources and invalidates the journal connection.
func (conn *Connection) Destroy() {
// invalidate cluster connection metadata

View File

@ -0,0 +1,236 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"github.com/ceph/ceph-csi/internal/util/k8s"
awsSTS "github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/aws-sdk-go/aws"
awsCreds "github.com/aws/aws-sdk-go/aws/credentials"
awsSession "github.com/aws/aws-sdk-go/aws/session"
awsKMS "github.com/aws/aws-sdk-go/service/kms"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
kmsTypeAWSSTSMetadata = "aws-sts-metadata"
// awsRoleSessionName is the name of the role session to connect with aws STS.
awsRoleSessionName = "ceph-csi-aws-sts-metadata"
// awsMetadataDefaultSecretsName is the default name of the Kubernetes Secret
// that contains the credentials to access the Amazon KMS. The name of
// the Secret can be configured by setting the `kmsSecretName`
// option.
//
// #nosec:G101, value not credential, just references token.
awsSTSMetadataDefaultSecretsName = "ceph-csi-aws-credentials"
// awsSTSSecretNameKey is the key for the secret name in the config map.
awsSTSSecretNameKey = "secretName"
// The following options are part of the Kubernetes Secrets.
//
// #nosec:G101, value not credential, just configuration keys.
awsSTSRoleARNKey = "awsRoleARN"
awsSTSCMKARNKey = "awsCMKARN"
awsSTSRegionKey = "awsRegion"
// tokenFilePath is the path to the file containing the OIDC token.
//
// #nosec:G101, value not credential, just path to the token.
tokenFilePath = "/run/secrets/tokens/oidc-token"
)
var _ = RegisterProvider(Provider{
UniqueID: kmsTypeAWSSTSMetadata,
Initializer: initAWSSTSMetadataKMS,
})
type awsSTSMetadataKMS struct {
awsMetadataKMS
// AWS STS configuration options
role string
}
func initAWSSTSMetadataKMS(args ProviderInitArgs) (EncryptionKMS, error) {
kms := &awsSTSMetadataKMS{
awsMetadataKMS: awsMetadataKMS{
namespace: args.Tenant,
},
}
// get secret name if set, else use default.
err := setConfigString(&kms.secretName, args.Config, awsSTSSecretNameKey)
if errors.Is(err, errConfigOptionInvalid) {
return nil, err
} else if errors.Is(err, errConfigOptionMissing) {
kms.secretName = awsSTSMetadataDefaultSecretsName
}
// read the Kubernetes Secret with aws region, role & cmk ARN.
secrets, err := kms.getSecrets()
if err != nil {
return nil, fmt.Errorf("failed to get secrets: %w", err)
}
var found bool
kms.role, found = secrets[awsSTSRoleARNKey]
if !found {
return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSRoleARNKey)
}
kms.cmk, found = secrets[awsSTSCMKARNKey]
if !found {
return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSCMKARNKey)
}
kms.region, found = secrets[awsSTSRegionKey]
if !found {
return nil, fmt.Errorf("%w: %s", errConfigOptionMissing, awsSTSRegionKey)
}
return kms, nil
}
// getSecrets returns required STS configuration options from the Kubernetes Secret.
func (as *awsSTSMetadataKMS) getSecrets() (map[string]string, error) {
c, err := k8s.NewK8sClient()
if err != nil {
return nil, fmt.Errorf("failed to connect to Kubernetes to "+
"get Secret %s/%s: %w", as.namespace, as.secretName, err)
}
secret, err := c.CoreV1().Secrets(as.namespace).Get(context.TODO(),
as.secretName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get Secret %s/%s: %w",
as.namespace, as.secretName, err)
}
config := make(map[string]string)
for k, v := range secret.Data {
switch k {
case awsSTSRoleARNKey, awsSTSRegionKey, awsSTSCMKARNKey:
config[k] = string(v)
default:
return nil, fmt.Errorf("unsupported option for KMS "+
"provider %q: %s", kmsTypeAWSMetadata, k)
}
}
return config, nil
}
// getWebIdentityToken returns the web identity token from the file.
func (as *awsSTSMetadataKMS) getWebIdentityToken() (string, error) {
buf, err := os.ReadFile(tokenFilePath)
if err != nil {
return "", fmt.Errorf("failed to read oidc token file %q: %w",
tokenFilePath, err)
}
return string(buf), nil
}
// getServiceWithSTS returns a new awsSession established with the STS.
func (as *awsSTSMetadataKMS) getServiceWithSTS() (*awsKMS.KMS, error) {
webIdentityToken, err := as.getWebIdentityToken()
if err != nil {
return nil, fmt.Errorf("failed to get web identity token: %w", err)
}
client := awsSTS.New(awsSTS.Options{
Region: as.region,
})
output, err := client.AssumeRoleWithWebIdentity(context.TODO(),
&awsSTS.AssumeRoleWithWebIdentityInput{
RoleArn: aws.String(as.role),
RoleSessionName: aws.String(awsRoleSessionName),
WebIdentityToken: aws.String(webIdentityToken),
})
if err != nil {
return nil, fmt.Errorf("failed to assume role with web identity token: %w", err)
}
creds := awsCreds.NewStaticCredentials(*output.Credentials.AccessKeyId,
*output.Credentials.SecretAccessKey, *output.Credentials.SessionToken)
sess, err := awsSession.NewSessionWithOptions(awsSession.Options{
SharedConfigState: awsSession.SharedConfigDisable,
Config: aws.Config{
Credentials: creds,
Region: &as.region,
},
})
if err != nil {
return nil, fmt.Errorf("failed to create AWS session: %w", err)
}
return awsKMS.New(sess), nil
}
// EncryptDEK uses the Amazon KMS and the configured CMK to encrypt the DEK.
func (as *awsSTSMetadataKMS) EncryptDEK(_, plainDEK string) (string, error) {
svc, err := as.getServiceWithSTS()
if err != nil {
return "", fmt.Errorf("failed to get KMS service: %w", err)
}
result, err := svc.Encrypt(&awsKMS.EncryptInput{
KeyId: aws.String(as.cmk),
Plaintext: []byte(plainDEK),
})
if err != nil {
return "", fmt.Errorf("failed to encrypt DEK: %w", err)
}
// base64 encode the encrypted DEK, so that storing it should not have
// issues
return base64.StdEncoding.EncodeToString(result.CiphertextBlob), nil
}
// DecryptDEK uses the Amazon KMS and the configured CMK to decrypt the DEK.
func (as *awsSTSMetadataKMS) DecryptDEK(_, encryptedDEK string) (string, error) {
svc, err := as.getServiceWithSTS()
if err != nil {
return "", fmt.Errorf("failed to get KMS service: %w", err)
}
ciphertextBlob, err := base64.StdEncoding.DecodeString(encryptedDEK)
if err != nil {
return "", fmt.Errorf("failed to decode base64 cipher: %w",
err)
}
result, err := svc.Decrypt(&awsKMS.DecryptInput{
CiphertextBlob: ciphertextBlob,
})
if err != nil {
return "", fmt.Errorf("failed to decrypt DEK: %w", err)
}
return string(result.Plaintext), nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAWSSTSMetadataKMSRegistered(t *testing.T) {
t.Parallel()
_, ok := kmsManager.providers[kmsTypeAWSSTSMetadata]
assert.True(t, ok)
}

View File

@ -0,0 +1,152 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"github.com/ceph/ceph-csi/internal/cephfs"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Server struct of CEPH CSI driver with supported methods of CSI controller
// server spec.
type Server struct {
csi.UnimplementedControllerServer
// backendServer handles the CephFS requests
backendServer *cephfs.ControllerServer
}
// NewControllerServer initialize a controller server for ceph CSI driver.
func NewControllerServer(d *csicommon.CSIDriver) *Server {
// global instance of the volume journal, yuck
store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(cephfs.CSIInstanceID, fsutil.RadosNamespace)
return &Server{
backendServer: cephfs.NewControllerServer(d),
}
}
// ControllerGetCapabilities uses the CephFS backendServer to return the
// capabilities that were set in the Driver.Run() function.
func (cs *Server) ControllerGetCapabilities(
ctx context.Context,
req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
return cs.backendServer.ControllerGetCapabilities(ctx, req)
}
// ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported.
func (cs *Server) ValidateVolumeCapabilities(
ctx context.Context,
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
return cs.backendServer.ValidateVolumeCapabilities(ctx, req)
}
// CreateVolume creates the backing subvolume and on any error cleans up any
// created entities.
func (cs *Server) CreateVolume(
ctx context.Context,
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
res, err := cs.backendServer.CreateVolume(ctx, req)
if err != nil {
return res, fmt.Errorf("failed to create CephFS volume: %w", err)
}
backend := res.Volume
log.DebugLog(ctx, "CephFS volume created: %s", backend.VolumeId)
secret := req.GetSecrets()
cr, err := util.NewAdminCredentials(secret)
if err != nil {
log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
nfsVolume, err := NewNFSVolume(ctx, backend.VolumeId)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err = nfsVolume.Connect(cr)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to connect: %v", err)
}
defer nfsVolume.Destroy()
err = nfsVolume.CreateExport(backend)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to create export: %v", err)
}
log.DebugLog(ctx, "published NFS-export: %s", nfsVolume)
// volume has been exported over NFS, set the "share" parameter to
// allow mounting
backend.VolumeContext["share"] = nfsVolume.GetExportPath()
return &csi.CreateVolumeResponse{Volume: backend}, nil
}
// DeleteVolume deletes the volume in backend and its reservation.
func (cs *Server) DeleteVolume(
ctx context.Context,
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
secret := req.GetSecrets()
cr, err := util.NewAdminCredentials(secret)
if err != nil {
log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
defer cr.DeleteCredentials()
nfsVolume, err := NewNFSVolume(ctx, req.GetVolumeId())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err = nfsVolume.Connect(cr)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to connect: %v", err)
}
defer nfsVolume.Destroy()
err = nfsVolume.DeleteExport()
// TODO: if the export does not exist, but the backend does, delete the backend
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to delete export: %v", err)
}
log.DebugLog(ctx, "deleted NFS-export: %s", nfsVolume)
return cs.backendServer.DeleteVolume(ctx, req)
}

View File

@ -0,0 +1,317 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"strings"
fscore "github.com/ceph/ceph-csi/internal/cephfs/core"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
)
const (
// clusterNameKey is the key in OMAP that contains the name of the
// NFS-cluster. It will be prefixed with the journal configuration.
clusterNameKey = "nfs.cluster"
)
// NFSVolume presents the API for consumption by the CSI-controller to create,
// modify and delete the NFS-exported CephFS volume. Instances of this struct
// are short lived, they only exist as long as a CSI-procedure is active.
type NFSVolume struct {
// ctx is the context for this short living volume object
ctx context.Context
volumeID string
clusterID string
mons string
fscID int64
objectUUID string
// TODO: drop in favor of a go-ceph connection
cr *util.Credentials
connected bool
conn *util.ClusterConnection
}
// NewNFSVolume create a new NFSVolume instance for the currently executing
// CSI-procedure.
func NewNFSVolume(ctx context.Context, volumeID string) (*NFSVolume, error) {
vi := util.CSIIdentifier{}
err := vi.DecomposeCSIID(volumeID)
if err != nil {
return nil, fmt.Errorf("error decoding volume ID (%s): %w", volumeID, err)
}
return &NFSVolume{
ctx: ctx,
volumeID: volumeID,
clusterID: vi.ClusterID,
fscID: vi.LocationID,
objectUUID: vi.ObjectUUID,
conn: &util.ClusterConnection{},
}, nil
}
// String returns a simple/short representation of the NFSVolume.
func (nv *NFSVolume) String() string {
return nv.volumeID
}
// Connect fetches cluster connection details (like MONs) and connects to the
// Ceph cluster. This uses go-ceph, so after Connect(), Destroy() should be
// called to cleanup resources.
func (nv *NFSVolume) Connect(cr *util.Credentials) error {
if nv.connected {
return nil
}
var err error
nv.mons, err = util.Mons(util.CsiConfigFile, nv.clusterID)
if err != nil {
return fmt.Errorf("failed to get MONs for cluster (%s): %w", nv.clusterID, err)
}
err = nv.conn.Connect(nv.mons, cr)
if err != nil {
return fmt.Errorf("failed to connect to cluster: %w", err)
}
nv.cr = cr
nv.connected = true
return nil
}
// Destroy cleans up resources once the NFSVolume instance is not needed
// anymore.
func (nv *NFSVolume) Destroy() {
if nv.connected {
nv.conn.Destroy()
nv.connected = false
}
}
// GetExportPath returns the path on the NFS-server that can be used for
// mounting.
func (nv *NFSVolume) GetExportPath() string {
return "/" + nv.volumeID
}
// CreateExport takes the (CephFS) CSI-volume and instructs Ceph Mgr to create
// a new NFS-export for the volume on the Ceph managed NFS-server.
func (nv *NFSVolume) CreateExport(backend *csi.Volume) error {
if !nv.connected {
return fmt.Errorf("can not created export for %q: not connected", nv)
}
fs := backend.VolumeContext["fsName"]
nfsCluster := backend.VolumeContext["nfsCluster"]
path := backend.VolumeContext["subvolumePath"]
err := nv.setNFSCluster(nfsCluster)
if err != nil {
return fmt.Errorf("failed to set NFS-cluster: %w", err)
}
// TODO: use new go-ceph API, see ceph/ceph-csi#2977
// new versions of Ceph use a different command, and the go-ceph API
// also seems to be different :-/
//
// run the new command, but fall back to the previous one in case of an
// error
cmds := [][]string{
// ceph nfs export create cephfs --cluster-id <cluster_id>
// --pseudo-path <pseudo_path> --fsname <fsname>
// [--readonly] [--path=/path/in/cephfs]
nv.createExportCommand("--cluster-id="+nfsCluster,
"--fsname="+fs, "--pseudo-path="+nv.GetExportPath(),
"--path="+path),
// ceph nfs export create cephfs ${FS} ${NFS} /${EXPORT} ${SUBVOL_PATH}
nv.createExportCommand(nfsCluster, fs, nv.GetExportPath(), path),
}
stderr, err := nv.retryIfInvalid(cmds)
if err != nil {
return fmt.Errorf("failed to create export %q in NFS-cluster %q"+
"(%v): %s", nv, nfsCluster, err, stderr)
}
return nil
}
// retryIfInvalid executes the "ceph" command, and falls back to the next cmd
// in case the error is EINVAL.
func (nv *NFSVolume) retryIfInvalid(cmds [][]string) (string, error) {
var (
stderr string
err error
)
for _, cmd := range cmds {
_, stderr, err = util.ExecCommand(nv.ctx, "ceph", cmd...)
// in case of an invalid command, fallback to the next one
if strings.Contains(stderr, "Error EINVAL: invalid command") {
continue
}
// If we get here, either no error, or an unexpected error
// happened. There is no need to retry an other command.
break
}
return stderr, err
}
// createExportCommand returns the "ceph nfs export create ..." command
// arguments (without "ceph"). The order of the parameters matches old Ceph
// releases, new Ceph releases added --option formats, which can be added when
// passing the parameters to this function.
func (nv *NFSVolume) createExportCommand(nfsCluster, fs, export, path string) []string {
return []string{
"--id", nv.cr.ID,
"--keyfile=" + nv.cr.KeyFile,
"-m", nv.mons,
"nfs",
"export",
"create",
"cephfs",
fs,
nfsCluster,
export,
path,
}
}
// DeleteExport removes the NFS-export from the Ceph managed NFS-server.
func (nv *NFSVolume) DeleteExport() error {
if !nv.connected {
return fmt.Errorf("can not delete export for %q: not connected", nv)
}
nfsCluster, err := nv.getNFSCluster()
if err != nil {
return fmt.Errorf("failed to identify NFS cluster: %w", err)
}
// TODO: use new go-ceph API, see ceph/ceph-csi#2977
// new versions of Ceph use a different command, and the go-ceph API
// also seems to be different :-/
//
// run the new command, but fall back to the previous one in case of an
// error
cmds := [][]string{
// ceph nfs export rm <cluster_id> <pseudo_path>
nv.deleteExportCommand("rm", nfsCluster),
// ceph nfs export delete <cluster_id> <pseudo_path>
nv.deleteExportCommand("delete", nfsCluster),
}
stderr, err := nv.retryIfInvalid(cmds)
if err != nil {
return fmt.Errorf("failed to delete export %q from NFS-cluster"+
"%q (%v): %s", nv, nfsCluster, err, stderr)
}
return nil
}
// deleteExportCommand returns the "ceph nfs export delete ..." command
// arguments (without "ceph"). Old releases of Ceph expect "delete" as cmd,
// newer releases use "rm".
func (nv *NFSVolume) deleteExportCommand(cmd, nfsCluster string) []string {
return []string{
"--id", nv.cr.ID,
"--keyfile=" + nv.cr.KeyFile,
"-m", nv.mons,
"nfs",
"export",
cmd,
nfsCluster,
nv.GetExportPath(),
}
}
// getNFSCluster fetches the NFS-cluster name from the CephFS journal.
func (nv *NFSVolume) getNFSCluster() (string, error) {
if !nv.connected {
return "", fmt.Errorf("can not get NFS-cluster for %q: not connected", nv)
}
fs := fscore.NewFileSystem(nv.conn)
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
if err != nil {
return "", fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
}
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
if err != nil {
return "", fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
}
// Connect to cephfs' default radosNamespace (csi)
j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr)
if err != nil {
return "", fmt.Errorf("failed to connect to journal: %w", err)
}
defer j.Destroy()
clusterName, err := j.FetchAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey)
if err != nil {
return "", fmt.Errorf("failed to get cluster name: %w", err)
}
return clusterName, nil
}
// setNFSCluster stores the NFS-cluster name in the CephFS journal.
func (nv *NFSVolume) setNFSCluster(clusterName string) error {
if !nv.connected {
return fmt.Errorf("can not set NFS-cluster for %q: not connected", nv)
}
fs := fscore.NewFileSystem(nv.conn)
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
if err != nil {
return fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
}
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
if err != nil {
return fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
}
// Connect to cephfs' default radosNamespace (csi)
j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr)
if err != nil {
return fmt.Errorf("failed to connect to journal: %w", err)
}
defer j.Destroy()
err = j.StoreAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey, clusterName)
if err != nil {
return fmt.Errorf("failed to store cluster name: %w", err)
}
return nil
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/nfs/controller"
"github.com/ceph/ceph-csi/internal/nfs/identity"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi"
)
// Driver contains the default identity and controller struct.
type Driver struct{}
// NewDriver returns new ceph driver.
func NewDriver() *Driver {
return &Driver{}
}
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests.
func (fs *Driver) Run(conf *util.Config) {
// Initialize default library driver
cd := csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
if cd == nil {
log.FatalLogMsg("failed to initialize CSI driver")
}
cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
})
// VolumeCapabilities are validated by the CephFS Controller
cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
})
// Create gRPC servers
server := csicommon.NewNonBlockingGRPCServer()
srv := csicommon.Servers{
IS: identity.NewIdentityServer(cd),
CS: controller.NewControllerServer(cd),
}
server.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics {
log.WarningLogMsg("EnableGRPCMetrics is deprecated")
go util.StartMetricsServer(conf)
}
if conf.EnableProfiling {
if !conf.EnableGRPCMetrics {
go util.StartMetricsServer(conf)
}
log.DebugLogMsg("Registering profiling handler")
go util.EnableProfiling()
}
server.Wait()
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package identity
import (
"context"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/container-storage-interface/spec/lib/go/csi"
)
// Server struct of ceph CSI driver with supported methods of CSI identity
// server spec.
type Server struct {
*csicommon.DefaultIdentityServer
}
// NewIdentityServer initialize a identity server for ceph CSI driver.
func NewIdentityServer(d *csicommon.CSIDriver) *Server {
return &Server{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
// GetPluginCapabilities returns available capabilities of the ceph driver.
func (is *Server) GetPluginCapabilities(
ctx context.Context,
req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
},
},
},
},
}, nil
}

View File

@ -56,23 +56,12 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
if err != nil {
switch {
case errors.Is(err, ErrSnapNotFound):
// check temporary image needs flatten, if yes add task to flatten the
// temporary clone
err = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
// as the snapshot is not present, create new snapshot,clone and
// delete the temporary snapshot
err = createRBDClone(ctx, tempClone, rv, snap)
if err != nil {
return false, err
}
// check image needs flatten, if yes add task to flatten the clone
err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
return true, nil
@ -114,11 +103,6 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
return false, err
}
// check image needs flatten, if yes add task to flatten the clone
err = rv.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
return false, err
}
return true, nil
}
@ -186,10 +170,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
}
func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) error {
var (
errClone error
errFlatten error
)
var errClone error
// generate temp cloned volume
tempClone := rv.generateTempClone()
@ -218,62 +199,22 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
}
}
if err != nil || errFlatten != nil {
if !errors.Is(errFlatten, ErrFlattenInProgress) {
// cleanup snapshot
cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone)
if cErr != nil {
log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempSnap, tempClone, cErr)
}
if err != nil {
// cleanup snapshot
cErr := cleanUpSnapshot(ctx, parentVol, tempSnap, tempClone)
if cErr != nil {
log.ErrorLog(ctx, "failed to cleanup image %s or snapshot %s: %v", tempClone, tempSnap, cErr)
}
}
}()
// flatten clone
errFlatten = tempClone.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if errFlatten != nil {
return errFlatten
}
// create snap of temp clone from temporary cloned image
// create final clone
// delete snap of temp clone
errClone = createRBDClone(ctx, tempClone, rv, cloneSnap)
if errClone != nil {
// set errFlatten error to cleanup temporary snapshot and temporary clone
errFlatten = errors.New("failed to create user requested cloned image")
return errClone
}
return nil
}
func (rv *rbdVolume) flattenCloneImage(ctx context.Context) error {
tempClone := rv.generateTempClone()
// reducing the limit for cloned images to make sure the limit is in range,
// If the intermediate clone reaches the depth we may need to return ABORT
// error message as it need to be flatten before continuing, this may leak
// omap entries and stale temporary snapshots in corner cases, if we reduce
// the limit and check for the depth of the parent image clain itself we
// can flatten the parent images before used to avoid the stale omap entries.
hardLimit := rbdHardMaxCloneDepth
softLimit := rbdSoftMaxCloneDepth
// choosing 2 so that we don't need to flatten the image in the request.
const depthToAvoidFlatten = 2
if rbdHardMaxCloneDepth > depthToAvoidFlatten {
hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten
}
if rbdSoftMaxCloneDepth > depthToAvoidFlatten {
softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten
}
err := tempClone.getImageInfo()
if err == nil {
return tempClone.flattenRbdImage(ctx, false, hardLimit, softLimit)
}
if !errors.Is(err, ErrImageNotFound) {
return err
}
return rv.flattenRbdImage(ctx, false, hardLimit, softLimit)
}

View File

@ -22,6 +22,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"
librbd "github.com/ceph/go-ceph/rbd"
@ -123,13 +124,27 @@ func (cs *ControllerServer) parseVolCreateRequest(
rbdVol, err := genVolFromVolumeOptions(
ctx,
req.GetParameters(),
req.GetSecrets(),
isMultiWriter && isBlock,
false)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// if the KMS is of type VaultToken, additional metadata is needed
// depending on the tenant, the KMS can be configured with other
// options
// FIXME: this works only on Kubernetes, how do other CO supply metadata?
// namespace is derived from the `csi.storage.k8s.io/pvc/namespace`
// parameter.
// get the owner of the PVC which is required for few encryption related operations
rbdVol.Owner = k8s.GetOwner(req.GetParameters())
err = rbdVol.initKMS(ctx, req.GetParameters(), req.GetSecrets())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
rbdVol.RequestName = req.GetName()
// Volume Size - Default is 1 GiB
@ -159,7 +174,8 @@ func (cs *ControllerServer) parseVolCreateRequest(
}
func buildCreateVolumeResponse(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) *csi.CreateVolumeResponse {
volumeContext := req.GetParameters()
// remove kubernetes csi prefixed parameters.
volumeContext := k8s.RemoveCSIPrefixedParameters(req.GetParameters())
volumeContext["pool"] = rbdVol.Pool
volumeContext["journalPool"] = rbdVol.JournalPool
volumeContext["imageName"] = rbdVol.RbdImageName
@ -286,7 +302,7 @@ func (cs *ControllerServer) CreateVolume(
return nil, err
}
err = flattenParentImage(ctx, parentVol, cr)
err = flattenParentImage(ctx, parentVol, rbdSnap, cr)
if err != nil {
return nil, err
}
@ -297,11 +313,9 @@ func (cs *ControllerServer) CreateVolume(
}
defer func() {
if err != nil {
if !errors.Is(err, ErrFlattenInProgress) {
errDefer := undoVolReservation(ctx, rbdVol, cr)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
}
errDefer := undoVolReservation(ctx, rbdVol, cr)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", req.GetName(), errDefer)
}
}
}()
@ -318,12 +332,38 @@ func (cs *ControllerServer) CreateVolume(
return buildCreateVolumeResponse(req, rbdVol), nil
}
func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credentials) error {
// flattenParentImage is to be called before proceeding with creating volume,
// with datasource. This function flattens the parent image accordingly to
// make sure no flattening is required during or after the new volume creation.
// For parent volume, it's parent(temp clone or snapshot) is flattened.
// For parent snapshot, the snapshot itself is flattened.
func flattenParentImage(
ctx context.Context,
rbdVol *rbdVolume,
rbdSnap *rbdSnapshot,
cr *util.Credentials) error {
// flatten the image's parent before the reservation to avoid
// stale entries in post creation if we return ABORT error and the
// DeleteVolume RPC is not called.
// reducing the limit for cloned images to make sure the limit is in range,
// If the intermediate clone reaches the depth we may need to return ABORT
// error message as it need to be flatten before continuing, this may leak
// omap entries and stale temporary snapshots in corner cases, if we reduce
// the limit and check for the depth of the parent image clain itself we
// can flatten the parent images before used to avoid the stale omap entries.
hardLimit := rbdHardMaxCloneDepth
softLimit := rbdSoftMaxCloneDepth
if rbdVol != nil {
// flatten the image or its parent before the reservation to avoid
// stale entries in post creation if we return ABORT error and the
// delete volume is not called
err := rbdVol.flattenCloneImage(ctx)
// choosing 2, since cloning image creates a temp clone and a final clone which
// will add a total depth of 2.
const depthToAvoidFlatten = 2
if rbdHardMaxCloneDepth > depthToAvoidFlatten {
hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten
}
if rbdSoftMaxCloneDepth > depthToAvoidFlatten {
softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten
}
err := rbdVol.flattenParent(ctx, hardLimit, softLimit)
if err != nil {
return getGRPCErrorForCreateVolume(err)
}
@ -335,6 +375,32 @@ func flattenParentImage(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
return err
}
}
if rbdSnap != nil {
err := rbdSnap.Connect(cr)
if err != nil {
return getGRPCErrorForCreateVolume(err)
}
// in case of any error call Destroy for cleanup.
defer func() {
if err != nil {
rbdSnap.Destroy()
}
}()
// choosing 1, since restore from snapshot adds one depth.
const depthToAvoidFlatten = 1
if rbdHardMaxCloneDepth > depthToAvoidFlatten {
hardLimit = rbdHardMaxCloneDepth - depthToAvoidFlatten
}
if rbdSoftMaxCloneDepth > depthToAvoidFlatten {
softLimit = rbdSoftMaxCloneDepth - depthToAvoidFlatten
}
err = rbdSnap.flattenRbdImage(ctx, false, hardLimit, softLimit)
if err != nil {
return getGRPCErrorForCreateVolume(err)
}
}
return nil
}
@ -574,10 +640,8 @@ func (cs *ControllerServer) createBackingImage(
defer func() {
if err != nil {
if !errors.Is(err, ErrFlattenInProgress) {
if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
}
if deleteErr := rbdVol.deleteImage(ctx); deleteErr != nil {
log.ErrorLog(ctx, "failed to delete rbd image: %s with error: %v", rbdVol, deleteErr)
}
}
}()
@ -586,15 +650,6 @@ func (cs *ControllerServer) createBackingImage(
return status.Error(codes.Internal, err.Error())
}
if rbdSnap != nil {
err = rbdVol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
if err != nil {
log.ErrorLog(ctx, "failed to flatten image %s: %v", rbdVol, err)
return err
}
}
return nil
}

View File

@ -265,22 +265,14 @@ func (ri *rbdImage) initKMS(ctx context.Context, volOptions, credentials map[str
}
// ParseEncryptionOpts returns kmsID and sets Owner attribute.
func (ri *rbdImage) ParseEncryptionOpts(ctx context.Context, volOptions map[string]string) (string, error) {
func (ri *rbdImage) ParseEncryptionOpts(
ctx context.Context,
volOptions map[string]string) (string, error) {
var (
err error
ok bool
encrypted, kmsID string
)
// if the KMS is of type VaultToken, additional metadata is needed
// depending on the tenant, the KMS can be configured with other
// options
// FIXME: this works only on Kubernetes, how do other CO supply metadata?
ri.Owner, ok = volOptions["csi.storage.k8s.io/pvc/namespace"]
if !ok {
log.DebugLog(ctx, "could not detect owner for %s", ri)
}
encrypted, ok = volOptions["encrypted"]
if !ok {
return "", nil

View File

@ -88,8 +88,8 @@ func (ri *rbdImage) promoteImage(force bool) error {
return nil
}
// forcePromoteImage promotes image to primary with force option with 1 minute
// timeout. If there is no response within 1 minute,the rbd CLI process will be
// forcePromoteImage promotes image to primary with force option with 2 minutes
// timeout. If there is no response within 2 minutes,the rbd CLI process will be
// killed and an error is returned.
func (rv *rbdVolume) forcePromoteImage(cr *util.Credentials) error {
promoteArgs := []string{
@ -102,7 +102,8 @@ func (rv *rbdVolume) forcePromoteImage(cr *util.Credentials) error {
}
_, stderr, err := util.ExecCommandWithTimeout(
context.TODO(),
time.Minute,
// 2 minutes timeout as the Replication RPC timeout is 2.5 minutes.
2*time.Minute,
"rbd",
promoteArgs...,
)

View File

@ -147,8 +147,7 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
func populateRbdVol(
ctx context.Context,
req *csi.NodeStageVolumeRequest,
cr *util.Credentials,
secrets map[string]string) (*rbdVolume, error) {
cr *util.Credentials) (*rbdVolume, error) {
var err error
var j *journal.Connection
volID := req.GetVolumeId()
@ -173,7 +172,7 @@ func populateRbdVol(
disableInUseChecks = true
}
rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), secrets, disableInUseChecks, true)
rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), disableInUseChecks, true)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -213,6 +212,8 @@ func populateRbdVol(
return nil, status.Error(codes.Internal, err.Error())
}
rv.RbdImageName = imageAttributes.ImageName
// set owner after extracting the owner name from the journal
rv.Owner = imageAttributes.Owner
}
err = rv.Connect(cr)
@ -235,6 +236,11 @@ func populateRbdVol(
return nil, status.Error(codes.Internal, err.Error())
}
err = rv.initKMS(ctx, req.GetVolumeContext(), req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if req.GetVolumeContext()["mounter"] == rbdDefaultMounter &&
!isKrbdFeatureSupported(ctx, strings.Join(rv.ImageFeatureSet.Names(), ",")) {
if !parseBoolOption(ctx, req.GetVolumeContext(), tryOtherMounters, false) {
@ -320,7 +326,7 @@ func (ns *NodeServer) NodeStageVolume(
}
isStaticVol := parseBoolOption(ctx, req.GetVolumeContext(), staticVol, false)
rv, err := populateRbdVol(ctx, req, cr, req.GetSecrets())
rv, err := populateRbdVol(ctx, req, cr)
if err != nil {
return nil, err
}

View File

@ -228,21 +228,19 @@ func setRbdNbdToolFeatures() {
// returns mounter specific options.
func parseMapOptions(mapOptions string) (string, string, error) {
var krbdMapOptions, nbdMapOptions string
const (
noKeyLength = 1
validLength = 2
)
for _, item := range strings.Split(mapOptions, ";") {
var mounter, options string
if item == "" {
continue
}
s := strings.Split(item, ":")
switch len(s) {
case noKeyLength:
s := strings.SplitN(item, ":", 2)
if len(s) == 1 {
options = strings.TrimSpace(s[0])
krbdMapOptions = options
case validLength:
} else {
// options might also contain values delimited with ":", in this
// case mounter type MUST be specified.
// ex: krbd:read_from_replica=localize,crush_location=zone:zone1;
mounter = strings.TrimSpace(s[0])
options = strings.TrimSpace(s[1])
switch strings.ToLower(mounter) {
@ -251,10 +249,8 @@ func parseMapOptions(mapOptions string) (string, string, error) {
case accessTypeNbd:
nbdMapOptions = options
default:
return "", "", fmt.Errorf("unknown mounter type: %q", mounter)
return "", "", fmt.Errorf("unknown mounter type: %q, please specify mounter type", mounter)
}
default:
return "", "", fmt.Errorf("badly formatted map/unmap options: %q", mapOptions)
}
}

View File

@ -60,18 +60,25 @@ func TestParseMapOptions(t *testing.T) {
expectErr: "",
},
{
name: "unknown mounter used",
mapOption: "xyz:xOp1,xOp2",
name: "with `:` delimiter used with in the options",
mapOption: "krbd:kOp1,kOp2=kOp21:kOp22;nbd:nOp1,nOp2=nOp21:nOp22",
expectKrbdOptions: "kOp1,kOp2=kOp21:kOp22",
expectNbdOptions: "nOp1,nOp2=nOp21:nOp22",
expectErr: "",
},
{
name: "with `:` delimiter used with in the options, without mounter label",
mapOption: "kOp1,kOp2=kOp21:kOp22;nbd:nOp1,nOp2",
expectKrbdOptions: "",
expectNbdOptions: "",
expectErr: "unknown mounter type",
},
{
name: "bad formatted options",
mapOption: "nbd:nOp1:nOp2;",
name: "unknown mounter used",
mapOption: "xyz:xOp1,xOp2",
expectKrbdOptions: "",
expectNbdOptions: "",
expectErr: "badly formatted map/unmap options",
expectErr: "unknown mounter type",
},
}
for _, tt := range tests {

View File

@ -267,7 +267,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
rv.RbdImageName = imageData.ImageAttributes.ImageName
rv.ImageID = imageData.ImageAttributes.ImageID
// check if topology constraints match what is found
rv.Topology, err = util.MatchTopologyForPool(rv.TopologyPools, rv.TopologyRequirement,
_, _, rv.Topology, err = util.MatchPoolAndTopology(rv.TopologyPools, rv.TopologyRequirement,
imageData.ImagePool)
if err != nil {
// TODO check if need any undo operation here, or ErrVolNameConflict
@ -303,7 +303,6 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
return false, err
}
// TODO: check image needs flattening and completed?
err = rv.repairImageID(ctx, j, false)
if err != nil {
@ -413,7 +412,10 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
var err error
if rbdSnap != nil {
// check if topology constraints matches snapshot pool
rbdVol.Topology, err = util.MatchTopologyForPool(rbdVol.TopologyPools,
var poolName string
var dataPoolName string
poolName, dataPoolName, rbdVol.Topology, err = util.MatchPoolAndTopology(rbdVol.TopologyPools,
rbdVol.TopologyRequirement, rbdSnap.Pool)
if err != nil {
return err
@ -421,7 +423,8 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
// update Pool, if it was topology constrained
if rbdVol.Topology != nil {
rbdVol.Pool = rbdSnap.Pool
rbdVol.Pool = poolName
rbdVol.DataPool = dataPoolName
}
return nil
@ -532,7 +535,7 @@ func undoVolReservation(ctx context.Context, rbdVol *rbdVolume, cr *util.Credent
// which are not same across clusters.
func RegenerateJournal(
volumeAttributes map[string]string,
volumeID, requestName string,
volumeID, requestName, owner string,
cr *util.Credentials) (string, error) {
ctx := context.Background()
var (
@ -552,6 +555,8 @@ func RegenerateJournal(
ErrInvalidVolID, err, rbdVol.VolID)
}
rbdVol.Owner = owner
kmsID, err = rbdVol.ParseEncryptionOpts(ctx, volumeAttributes)
if err != nil {
return "", err

View File

@ -1142,7 +1142,7 @@ func generateVolumeFromMapping(
func genVolFromVolumeOptions(
ctx context.Context,
volOptions, credentials map[string]string,
volOptions map[string]string,
disableInUseChecks, checkClusterIDMapping bool) (*rbdVolume, error) {
var (
ok bool
@ -1195,11 +1195,6 @@ func genVolFromVolumeOptions(
rbdVol.Mounter)
rbdVol.DisableInUseChecks = disableInUseChecks
err = rbdVol.initKMS(ctx, volOptions, credentials)
if err != nil {
return nil, err
}
return rbdVol, nil
}
@ -1440,6 +1435,47 @@ func (ri *rbdImage) getImageInfo() error {
return nil
}
// getParent returns parent image if it exists.
func (ri *rbdImage) getParent() (*rbdImage, error) {
err := ri.getImageInfo()
if err != nil {
return nil, err
}
if ri.ParentName == "" {
return nil, nil
}
parentImage := rbdImage{}
parentImage.conn = ri.conn.Copy()
parentImage.ClusterID = ri.ClusterID
parentImage.Monitors = ri.Monitors
parentImage.Pool = ri.ParentPool
parentImage.RadosNamespace = ri.RadosNamespace
parentImage.RbdImageName = ri.ParentName
err = parentImage.getImageInfo()
if err != nil {
return nil, err
}
return &parentImage, nil
}
// flattenParent flatten the given image's parent if it exists according to hard and soft
// limits.
func (ri *rbdImage) flattenParent(ctx context.Context, hardLimit, softLimit uint) error {
parentImage, err := ri.getParent()
if err != nil {
return err
}
if parentImage == nil {
return nil
}
return parentImage.flattenRbdImage(ctx, false, hardLimit, softLimit)
}
/*
checkSnapExists queries rbd about the snapshots of the given image and returns
ErrImageNotFound if provided image is not found, and ErrSnapNotFound if

View File

@ -0,0 +1,45 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"strings"
)
// CSI Parameters prefixed with csiParameterPrefix are passed through
// to the driver on CreateVolumeRequest/CreateSnapshotRequest calls.
const (
csiParameterPrefix = "csi.storage.k8s.io/"
pvcNamespaceKey = "csi.storage.k8s.io/pvc/namespace"
)
// RemoveCSIPrefixedParameters removes parameters prefixed with csiParameterPrefix.
func RemoveCSIPrefixedParameters(param map[string]string) map[string]string {
newParam := map[string]string{}
for k, v := range param {
if !strings.HasPrefix(k, csiParameterPrefix) {
// add the parameter to the new map if its not having the prefix
newParam[k] = v
}
}
return newParam
}
// GetOwner returns the pvc namespace name from the parameter.
func GetOwner(param map[string]string) string {
return param[pvcNamespaceKey]
}

View File

@ -0,0 +1,95 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import (
"reflect"
"testing"
)
func TestRemoveCSIPrefixedParameters(t *testing.T) {
t.Parallel()
tests := []struct {
name string
param map[string]string
want map[string]string
}{
{
name: "without csi.storage.k8s.io prefix",
param: map[string]string{
"foo": "bar",
},
want: map[string]string{
"foo": "bar",
},
},
{
name: "with csi.storage.k8s.io prefix",
param: map[string]string{
"foo": "bar",
"csi.storage.k8s.io/pvc/name": "foo",
"csi.storage.k8s.io/pvc/namespace": "bar",
"csi.storage.k8s.io/pv/name": "baz",
},
want: map[string]string{
"foo": "bar",
},
},
}
for _, tt := range tests {
ts := tt
t.Run(ts.name, func(t *testing.T) {
t.Parallel()
got := RemoveCSIPrefixedParameters(ts.param)
if !reflect.DeepEqual(got, ts.want) {
t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, ts.want)
}
})
}
}
func TestGetOwner(t *testing.T) {
t.Parallel()
tests := []struct {
name string
args map[string]string
want string
}{
{
name: "namespace is not present in the parameters",
args: map[string]string{
"foo": "bar",
},
want: "",
},
{
name: "namespace is present in the parameters",
args: map[string]string{
"csi.storage.k8s.io/pvc/namespace": "bar",
},
want: "bar",
},
}
for _, tt := range tests {
ts := tt
t.Run(ts.name, func(t *testing.T) {
t.Parallel()
if got := GetOwner(ts.args); got != ts.want {
t.Errorf("GetOwner() = %v, want %v", got, ts.want)
}
})
}
}

View File

@ -0,0 +1,85 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
goerrors "errors"
"fmt"
"github.com/ceph/go-ceph/rados"
"golang.org/x/sys/unix"
)
// ErrObjectOutOfDate is an error returned by RADOS read/write ops whose
// rados_*_op_assert_version failed.
var ErrObjectOutOfDate = goerrors.New("object is out of date since the last time it was read, try again later")
// UnexpectedReadSize formats an error message for a failure due to bad read
// size.
func UnexpectedReadSize(expectedBytes, actualBytes int) error {
return fmt.Errorf("unexpected size read: expected %d bytes, got %d",
expectedBytes, actualBytes)
}
// UnknownObjectVersion formats an error message for a failure due to unknown
// reftracker object version.
func UnknownObjectVersion(unknownVersion uint32) error {
return fmt.Errorf("unknown reftracker version %d", unknownVersion)
}
// FailedObjectRead formats an error message for a failed RADOS read op.
func FailedObjectRead(cause error) error {
if cause != nil {
return fmt.Errorf("failed to read object: %w", TryRADOSAborted(cause))
}
return nil
}
// FailedObjectRead formats an error message for a failed RADOS read op.
func FailedObjectWrite(cause error) error {
if cause != nil {
return fmt.Errorf("failed to write object: %w", TryRADOSAborted(cause))
}
return nil
}
// TryRADOSAborted tries to extract rados_*_op_assert_version from opErr.
func TryRADOSAborted(opErr error) error {
if opErr == nil {
return nil
}
var radosOpErr rados.OperationError
if !goerrors.As(opErr, &radosOpErr) {
return opErr
}
// nolint:errorlint // Can't use errors.As() because rados.radosError is private.
errnoErr, ok := radosOpErr.OpError.(interface{ ErrorCode() int })
if !ok {
return opErr
}
errno := errnoErr.ErrorCode()
if errno == -int(unix.EOVERFLOW) || errno == -int(unix.ERANGE) {
return ErrObjectOutOfDate
}
return nil
}

View File

@ -0,0 +1,551 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package radoswrapper
import (
"fmt"
"github.com/ceph/go-ceph/rados"
"golang.org/x/sys/unix"
)
type (
FakeObj struct {
Oid string
Ver uint64
Xattrs map[string][]byte
Omap map[string][]byte
Data []byte
}
FakeRados struct {
Objs map[string]*FakeObj
}
FakeIOContext struct {
LastObjVersion uint64
Rados *FakeRados
}
FakeWriteOp struct {
IoCtx *FakeIOContext
steps map[fakeWriteOpStepExecutorIdx]fakeWriteOpStepExecutor
oid string
}
FakeReadOp struct {
IoCtx *FakeIOContext
steps map[fakeReadOpStepExecutorIdx]fakeReadOpStepExecutor
oid string
}
fakeWriteOpStepExecutorIdx int
fakeReadOpStepExecutorIdx int
fakeWriteOpStepExecutor interface {
operate(w *FakeWriteOp) error
}
fakeReadOpStepExecutor interface {
operate(r *FakeReadOp) error
}
fakeRadosError int
)
const (
fakeWriteOpAssertVersionExecutorIdx fakeWriteOpStepExecutorIdx = iota
fakeWriteOpRemoveExecutorIdx
fakeWriteOpCreateExecutorIdx
fakeWriteOpSetXattrExecutorIdx
fakeWriteOpWriteFullExecutorIdx
fakeWriteOpRmOmapKeysExecutorIdx
fakeWriteOpSetOmapExecutorIdx
fakeReadOpAssertVersionExecutorIdx fakeReadOpStepExecutorIdx = iota
fakeReadOpReadExecutorIdx
fakeReadOpGetOmapValuesByKeysExecutorIdx
)
var (
_ IOContextW = &FakeIOContext{}
// fakeWriteOpStepExecutorOrder defines fixed order in which the write ops are performed.
fakeWriteOpStepExecutorOrder = []fakeWriteOpStepExecutorIdx{
fakeWriteOpAssertVersionExecutorIdx,
fakeWriteOpRemoveExecutorIdx,
fakeWriteOpCreateExecutorIdx,
fakeWriteOpSetXattrExecutorIdx,
fakeWriteOpWriteFullExecutorIdx,
fakeWriteOpRmOmapKeysExecutorIdx,
fakeWriteOpSetOmapExecutorIdx,
}
// fakeReadOpStepExecutorOrder defines fixed order in which the read ops are performed.
fakeReadOpStepExecutorOrder = []fakeReadOpStepExecutorIdx{
fakeReadOpAssertVersionExecutorIdx,
fakeReadOpReadExecutorIdx,
fakeReadOpGetOmapValuesByKeysExecutorIdx,
}
)
func NewFakeRados() *FakeRados {
return &FakeRados{
Objs: make(map[string]*FakeObj),
}
}
func NewFakeIOContext(fakeRados *FakeRados) *FakeIOContext {
return &FakeIOContext{
Rados: fakeRados,
}
}
func (e fakeRadosError) Error() string {
return fmt.Sprintf("FakeRados errno=%d", int(e))
}
func (e fakeRadosError) ErrorCode() int {
return int(e)
}
func (o *FakeObj) String() string {
return fmt.Sprintf("%s{Ver=%d, Xattrs(%d)=%+v, OMap(%d)=%+v, Data(%d)=%+v}",
o.Oid, o.Ver, len(o.Xattrs), o.Xattrs, len(o.Omap), o.Omap, len(o.Data), o.Data)
}
func (c *FakeIOContext) GetLastVersion() (uint64, error) {
return c.LastObjVersion, nil
}
func (c *FakeIOContext) getObj(oid string) (*FakeObj, error) {
obj, ok := c.Rados.Objs[oid]
if !ok {
return nil, rados.ErrNotFound
}
return obj, nil
}
func (c *FakeIOContext) GetXattr(oid, key string, data []byte) (int, error) {
obj, ok := c.Rados.Objs[oid]
if !ok {
return 0, rados.ErrNotFound
}
xattr, ok := obj.Xattrs[key]
if !ok {
return 0, fakeRadosError(-int(unix.ENODATA))
}
copy(data, xattr)
return len(xattr), nil
}
func (c *FakeIOContext) CreateWriteOp() WriteOpW {
return &FakeWriteOp{
IoCtx: c,
steps: make(map[fakeWriteOpStepExecutorIdx]fakeWriteOpStepExecutor),
}
}
func (w *FakeWriteOp) Operate(oid string) error {
if len(w.steps) == 0 {
return nil
}
w.oid = oid
for _, writeOpExecutorIdx := range fakeWriteOpStepExecutorOrder {
e, ok := w.steps[writeOpExecutorIdx]
if !ok {
continue
}
if err := e.operate(w); err != nil {
return err
}
}
if obj, err := w.IoCtx.getObj(oid); err == nil {
obj.Ver++
w.IoCtx.LastObjVersion = obj.Ver
}
return nil
}
func (w *FakeWriteOp) Release() {}
func (c *FakeIOContext) CreateReadOp() ReadOpW {
return &FakeReadOp{
IoCtx: c,
steps: make(map[fakeReadOpStepExecutorIdx]fakeReadOpStepExecutor),
}
}
func (r *FakeReadOp) Operate(oid string) error {
r.oid = oid
for _, readOpExecutorIdx := range fakeReadOpStepExecutorOrder {
e, ok := r.steps[readOpExecutorIdx]
if !ok {
continue
}
if err := e.operate(r); err != nil {
return err
}
}
if obj, err := r.IoCtx.getObj(oid); err == nil {
r.IoCtx.LastObjVersion = obj.Ver
}
return nil
}
func (r *FakeReadOp) Release() {}
// WriteOp Create
type fakeWriteOpCreateExecutor struct {
exclusive rados.CreateOption
}
func (e *fakeWriteOpCreateExecutor) operate(w *FakeWriteOp) error {
if e.exclusive == rados.CreateExclusive {
if _, exists := w.IoCtx.Rados.Objs[w.oid]; exists {
return rados.ErrObjectExists
}
}
w.IoCtx.Rados.Objs[w.oid] = &FakeObj{
Oid: w.oid,
Omap: make(map[string][]byte),
Xattrs: make(map[string][]byte),
}
return nil
}
func (w *FakeWriteOp) Create(exclusive rados.CreateOption) {
w.steps[fakeWriteOpCreateExecutorIdx] = &fakeWriteOpCreateExecutor{
exclusive: exclusive,
}
}
// WriteOp Remove
type fakeWriteOpRemoveExecutor struct{}
func (e *fakeWriteOpRemoveExecutor) operate(w *FakeWriteOp) error {
if _, err := w.IoCtx.getObj(w.oid); err != nil {
return err
}
delete(w.IoCtx.Rados.Objs, w.oid)
return nil
}
func (w *FakeWriteOp) Remove() {
w.steps[fakeWriteOpRemoveExecutorIdx] = &fakeWriteOpRemoveExecutor{}
}
// WriteOp SetXattr
type fakeWriteOpSetXattrExecutor struct {
name string
value []byte
}
func (e *fakeWriteOpSetXattrExecutor) operate(w *FakeWriteOp) error {
obj, err := w.IoCtx.getObj(w.oid)
if err != nil {
return err
}
obj.Xattrs[e.name] = e.value
return nil
}
func (w *FakeWriteOp) SetXattr(name string, value []byte) {
valueCopy := append([]byte(nil), value...)
w.steps[fakeWriteOpSetXattrExecutorIdx] = &fakeWriteOpSetXattrExecutor{
name: name,
value: valueCopy,
}
}
// WriteOp WriteFull
type fakeWriteOpWriteFullExecutor struct {
data []byte
}
func (e *fakeWriteOpWriteFullExecutor) operate(w *FakeWriteOp) error {
obj, err := w.IoCtx.getObj(w.oid)
if err != nil {
return err
}
obj.Data = e.data
return nil
}
func (w *FakeWriteOp) WriteFull(b []byte) {
bCopy := append([]byte(nil), b...)
w.steps[fakeWriteOpWriteFullExecutorIdx] = &fakeWriteOpWriteFullExecutor{
data: bCopy,
}
}
// WriteOp SetOmap
type fakeWriteOpSetOmapExecutor struct {
pairs map[string][]byte
}
func (e *fakeWriteOpSetOmapExecutor) operate(w *FakeWriteOp) error {
obj, err := w.IoCtx.getObj(w.oid)
if err != nil {
return err
}
for k, v := range e.pairs {
obj.Omap[k] = v
}
return nil
}
func (w *FakeWriteOp) SetOmap(pairs map[string][]byte) {
pairsCopy := make(map[string][]byte, len(pairs))
for k, v := range pairs {
vCopy := append([]byte(nil), v...)
pairsCopy[k] = vCopy
}
w.steps[fakeWriteOpSetOmapExecutorIdx] = &fakeWriteOpSetOmapExecutor{
pairs: pairsCopy,
}
}
// WriteOp RmOmapKeys
type fakeWriteOpRmOmapKeysExecutor struct {
keys []string
}
func (e *fakeWriteOpRmOmapKeysExecutor) operate(w *FakeWriteOp) error {
obj, err := w.IoCtx.getObj(w.oid)
if err != nil {
return err
}
for _, k := range e.keys {
delete(obj.Omap, k)
}
return nil
}
func (w *FakeWriteOp) RmOmapKeys(keys []string) {
keysCopy := append([]string(nil), keys...)
w.steps[fakeWriteOpRmOmapKeysExecutorIdx] = &fakeWriteOpRmOmapKeysExecutor{
keys: keysCopy,
}
}
// WriteOp AssertVersion
type fakeWriteOpAssertVersionExecutor struct {
version uint64
}
func (e *fakeWriteOpAssertVersionExecutor) operate(w *FakeWriteOp) error {
obj, err := w.IoCtx.getObj(w.oid)
if err != nil {
return err
}
return validateObjVersion(obj.Ver, e.version)
}
func (w *FakeWriteOp) AssertVersion(v uint64) {
w.steps[fakeWriteOpAssertVersionExecutorIdx] = &fakeWriteOpAssertVersionExecutor{
version: v,
}
}
// ReadOp Read
type fakeReadOpReadExecutor struct {
offset int
buffer []byte
step *rados.ReadOpReadStep
}
func (e *fakeReadOpReadExecutor) operate(r *FakeReadOp) error {
obj, err := r.IoCtx.getObj(r.oid)
if err != nil {
return err
}
if e.offset > len(obj.Data) {
// RADOS just returns zero bytes read.
return nil
}
end := e.offset + len(e.buffer)
if end > len(obj.Data) {
end = len(obj.Data)
}
nbytes := end - e.offset
e.step.BytesRead = int64(nbytes)
copy(e.buffer, obj.Data[e.offset:])
return nil
}
func (r *FakeReadOp) Read(offset uint64, buffer []byte) *rados.ReadOpReadStep {
s := &rados.ReadOpReadStep{}
r.steps[fakeReadOpReadExecutorIdx] = &fakeReadOpReadExecutor{
offset: int(offset),
buffer: buffer,
step: s,
}
return s
}
// ReadOp GetOmapValuesByKeys
type (
fakeReadOpGetOmapValuesByKeysExecutor struct {
keys []string
step *FakeReadOpOmapGetValsByKeysStep
}
FakeReadOpOmapGetValsByKeysStep struct {
pairs []rados.OmapKeyValue
idx int
canIterate bool
}
)
func (e *fakeReadOpGetOmapValuesByKeysExecutor) operate(r *FakeReadOp) error {
obj, err := r.IoCtx.getObj(r.oid)
if err != nil {
return err
}
var pairs []rados.OmapKeyValue
for _, key := range e.keys {
val, ok := obj.Omap[key]
if !ok {
continue
}
pairs = append(pairs, rados.OmapKeyValue{
Key: key,
Value: val,
})
}
e.step.pairs = pairs
e.step.canIterate = true
return nil
}
func (s *FakeReadOpOmapGetValsByKeysStep) Next() (*rados.OmapKeyValue, error) {
if !s.canIterate {
return nil, rados.ErrOperationIncomplete
}
if s.idx >= len(s.pairs) {
return nil, nil
}
omapKeyValue := &s.pairs[s.idx]
s.idx++
return omapKeyValue, nil
}
func (r *FakeReadOp) GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW {
keysCopy := append([]string(nil), keys...)
s := &FakeReadOpOmapGetValsByKeysStep{}
r.steps[fakeReadOpGetOmapValuesByKeysExecutorIdx] = &fakeReadOpGetOmapValuesByKeysExecutor{
keys: keysCopy,
step: s,
}
return s
}
// ReadOp AssertVersion
type fakeReadOpAssertVersionExecutor struct {
version uint64
}
func (e *fakeReadOpAssertVersionExecutor) operate(r *FakeReadOp) error {
obj, err := r.IoCtx.getObj(r.oid)
if err != nil {
return err
}
return validateObjVersion(obj.Ver, e.version)
}
func (r *FakeReadOp) AssertVersion(v uint64) {
r.steps[fakeReadOpAssertVersionExecutorIdx] = &fakeReadOpAssertVersionExecutor{
version: v,
}
}
func validateObjVersion(expected, actual uint64) error {
// See librados docs for returning error codes in rados_*_op_assert_version:
// https://docs.ceph.com/en/latest/rados/api/librados/?#c.rados_write_op_assert_version
// https://docs.ceph.com/en/latest/rados/api/librados/?#c.rados_read_op_assert_version
if expected > actual {
return rados.OperationError{
OpError: fakeRadosError(-int(unix.ERANGE)),
}
}
if expected < actual {
return rados.OperationError{
OpError: fakeRadosError(-int(unix.EOVERFLOW)),
}
}
return nil
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package radoswrapper
import (
"github.com/ceph/go-ceph/rados"
)
// These interfaces are just wrappers around some of go-ceph's rados pkg
// structures and functions. They have two implementations: the "real" one
// (that simply uses go-ceph), and a fake one, used in unit tests.
// IOContextW is a wrapper around rados.IOContext.
type IOContextW interface {
// GetLastVersion will return the version number of the last object read or
// written to.
GetLastVersion() (uint64, error)
// GetXattr gets an xattr with key `name`, it returns the length of
// the key read or an error if not successful
GetXattr(oid string, key string, data []byte) (int, error)
// CreateWriteOp returns a newly constructed write operation.
CreateWriteOp() WriteOpW
// CreateReadOp returns a newly constructed read operation.
CreateReadOp() ReadOpW
}
// WriteOpW is a wrapper around rados.WriteOp interface.
type WriteOpW interface {
// Create a rados object.
Create(exclusive rados.CreateOption)
// Remove object.
Remove()
// SetXattr sets an xattr.
SetXattr(name string, value []byte)
// WriteFull writes a given byte slice as the whole object,
// atomically replacing it.
WriteFull(b []byte)
// SetOmap appends the map `pairs` to the omap `oid`.
SetOmap(pairs map[string][]byte)
// RmOmapKeys removes the specified `keys` from the omap `oid`.
RmOmapKeys(keys []string)
// AssertVersion ensures that the object exists and that its internal version
// number is equal to "ver" before writing. "ver" should be a version number
// previously obtained with IOContext.GetLastVersion().
AssertVersion(ver uint64)
// Operate will perform the operation(s).
Operate(oid string) error
// Release the resources associated with this write operation.
Release()
}
// ReadOpW is a wrapper around rados.ReadOp.
type ReadOpW interface {
// Read bytes from offset into buffer.
// len(buffer) is the maximum number of bytes read from the object.
// buffer[:ReadOpReadStep.BytesRead] then contains object data.
Read(offset uint64, buffer []byte) *rados.ReadOpReadStep
// GetOmapValuesByKeys starts iterating over specific key/value pairs.
GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW
// AssertVersion ensures that the object exists and that its internal version
// number is equal to "ver" before reading. "ver" should be a version number
// previously obtained with IOContext.GetLastVersion().
AssertVersion(ver uint64)
// Operate will perform the operation(s).
Operate(oid string) error
// Release the resources associated with this read operation.
Release()
}
// ReadOpOmapGetValsByKeysStepW is a wrapper around rados.ReadOpOmapGetValsByKeysStep.
type ReadOpOmapGetValsByKeysStepW interface {
// Next gets the next omap key/value pair referenced by
// ReadOpOmapGetValsByKeysStep's internal iterator.
// If there are no more elements to retrieve, (nil, nil) is returned.
// May be called only after Operate() finished.
Next() (*rados.OmapKeyValue, error)
}

View File

@ -0,0 +1,133 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package radoswrapper
import (
"github.com/ceph/go-ceph/rados"
)
type (
IOContext struct {
*rados.IOContext
}
WriteOp struct {
IoCtx *rados.IOContext
*rados.WriteOp
}
ReadOp struct {
IoCtx *rados.IOContext
*rados.ReadOp
}
ReadOpOmapGetValsByKeysStep struct {
*rados.ReadOpOmapGetValsByKeysStep
}
)
var _ IOContextW = &IOContext{}
func NewIOContext(ioctx *rados.IOContext) IOContextW {
return &IOContext{
IOContext: ioctx,
}
}
func (c *IOContext) GetLastVersion() (uint64, error) {
return c.IOContext.GetLastVersion()
}
func (c *IOContext) GetXattr(oid, key string, data []byte) (int, error) {
return c.IOContext.GetXattr(oid, key, data)
}
func (c *IOContext) CreateWriteOp() WriteOpW {
return &WriteOp{
IoCtx: c.IOContext,
WriteOp: rados.CreateWriteOp(),
}
}
func (c *IOContext) CreateReadOp() ReadOpW {
return &ReadOp{
IoCtx: c.IOContext,
ReadOp: rados.CreateReadOp(),
}
}
func (w *WriteOp) Create(exclusive rados.CreateOption) {
w.WriteOp.Create(exclusive)
}
func (w *WriteOp) Remove() {
w.WriteOp.Remove()
}
func (w *WriteOp) SetXattr(name string, value []byte) {
w.WriteOp.SetXattr(name, value)
}
func (w *WriteOp) WriteFull(b []byte) {
w.WriteOp.WriteFull(b)
}
func (w *WriteOp) SetOmap(pairs map[string][]byte) {
w.WriteOp.SetOmap(pairs)
}
func (w *WriteOp) RmOmapKeys(keys []string) {
w.WriteOp.RmOmapKeys(keys)
}
func (w *WriteOp) AssertVersion(v uint64) {
w.WriteOp.AssertVersion(v)
}
func (w *WriteOp) Operate(oid string) error {
return w.WriteOp.Operate(w.IoCtx, oid, rados.OperationNoFlag)
}
func (w *WriteOp) Release() {
w.WriteOp.Release()
}
func (r *ReadOp) Read(offset uint64, buffer []byte) *rados.ReadOpReadStep {
return r.ReadOp.Read(offset, buffer)
}
func (r *ReadOp) GetOmapValuesByKeys(keys []string) ReadOpOmapGetValsByKeysStepW {
return &ReadOpOmapGetValsByKeysStep{
ReadOpOmapGetValsByKeysStep: r.ReadOp.GetOmapValuesByKeys(keys),
}
}
func (r *ReadOp) AssertVersion(v uint64) {
r.ReadOp.AssertVersion(v)
}
func (r *ReadOp) Operate(oid string) error {
return r.ReadOp.Operate(r.IoCtx, oid, rados.OperationNoFlag)
}
func (r *ReadOp) Release() {
r.ReadOp.Release()
}
func (s *ReadOpOmapGetValsByKeysStep) Next() (*rados.OmapKeyValue, error) {
return s.ReadOpOmapGetValsByKeysStep.Next()
}

View File

@ -0,0 +1,248 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reftracker
import (
goerrors "errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
"github.com/ceph/ceph-csi/internal/util/reftracker/reftype"
v1 "github.com/ceph/ceph-csi/internal/util/reftracker/v1"
"github.com/ceph/ceph-csi/internal/util/reftracker/version"
"github.com/ceph/go-ceph/rados"
)
// reftracker is key-based implementation of a reference counter.
//
// Unlike integer-based counter, reftracker counts references by tracking
// unique keys. This allows accounting in situations where idempotency must be
// preserved. It guarantees there will be no duplicit increments or decrements
// of the counter.
//
// It is stored persistently as a RADOS object, and is safe to use with
// multiple concurrent writers, and across different nodes of a cluster.
//
// Example:
//
// created, err := Add(
// ioctx,
// "my-reftracker",
// map[string]struct{}{
// "ref-key-1": {},
// "ref-key-2": {},
// },
// )
//
// Since this is a new reftracker object, `created` is `true`.
//
// "my-reftracker" now holds:
// ["ref-key-1":reftype.Normal, "ref-key-2":reftype.Normal]
// The reference count is 2.
//
// created, err := Add(
// ioctx,
// "my-reftracker",
// map[string]struct{}{
// "ref-key-1": {},
// "ref-key-2": {},
// "ref-key-3": {},
// },
// )
//
// Reftracker named "my-reftracker" already exists, so `created` is now
// `false`. Since "ref-key-1" and "ref-key-2" keys are already tracked,
// only "ref-key-3" is added.
//
// "my-reftracker" now holds:
// ["ref-key-1":reftype.Normal, "ref-key-2":reftype.Normal,
// "ref-key-3":reftype.Normal]
// The reference count is 3.
//
// deleted, err := Remove(
// ioctx,
// "my-reftracker",
// map[string]reftype.RefType{
// "ref-key-1": reftype.Normal,
// "ref-key-2": reftype.Mask,
// },
// )
//
// "my-reftracker" now holds:
// ["ref-key-2":reftype.Mask, "ref-key-3":reftype.Normal]
// The reference count is 1.
//
// Since the reference count is greater than zero, `deleted` is `false`.
// "ref-key-1" was removed, and so is not listed among tracked references.
// "ref-key-2" was only masked, so it's been kept. However, masked references
// don't contribute to overall reference count, so the resulting refcount
// after this Remove() call is 1.
//
// created, err := Add(
// ioctx,
// "my-reftracker",
// map[string]struct{}{
// "ref-key-2": {},
// },
// )
//
// "my-reftracker" now holds:
// ["ref-key-2":reftype.Mask, "ref-key-3":reftype.Normal]
// The reference count is 1.
//
// "ref-key-2" is already tracked, so it will not be added again. Since it
// remains masked, it won't contribute to the reference count.
//
// deleted, err := Remove(
// ioctx,
// "my-reftracker",
// map[string]reftype.RefType{
// "ref-key-3": reftype.Normal,
// },
// )
//
// "ref-key-3" was the only tracked key that contributed to reference count.
// After this Remove() call it's now removed. As a result, the reference count
// dropped down to zero, and the whole object has been deleted too.
// `deleted` is `true`.
// Add atomically adds references to `rtName` reference tracker.
// If the reftracker object doesn't exist yet, it is created and `true` is
// returned. If some keys in `refs` map are already tracked by this reftracker
// object, they will not be added again.
func Add(
ioctx radoswrapper.IOContextW,
rtName string,
refs map[string]struct{},
) (bool, error) {
if err := validateAddInput(rtName, refs); err != nil {
return false, err
}
// Read reftracker version.
rtVer, err := version.Read(ioctx, rtName)
if err != nil {
if goerrors.Is(err, rados.ErrNotFound) {
// This is a new reftracker. Initialize it with `refs`.
if err = v1.Init(ioctx, rtName, refs); err != nil {
return false, fmt.Errorf("failed to initialize reftracker: %w", err)
}
return true, nil
}
return false, fmt.Errorf("failed to read reftracker version: %w", err)
}
// Add references to reftracker object.
gen, err := ioctx.GetLastVersion()
if err != nil {
return false, fmt.Errorf("failed to get RADOS object version: %w", err)
}
switch rtVer {
case v1.Version:
err = v1.Add(ioctx, rtName, gen, refs)
if err != nil {
err = fmt.Errorf("failed to add refs: %w", err)
}
default:
err = errors.UnknownObjectVersion(rtVer)
}
return false, err
}
// Remove atomically removes references from `rtName` reference tracker.
// If the reftracker object holds no references after this removal, the whole
// object is deleted too, and `true` is returned. If the reftracker object
// doesn't exist, (true, nil) is returned.
func Remove(
ioctx radoswrapper.IOContextW,
rtName string,
refs map[string]reftype.RefType,
) (bool, error) {
if err := validateRemoveInput(rtName, refs); err != nil {
return false, err
}
// Read reftracker version.
rtVer, err := version.Read(ioctx, rtName)
if err != nil {
if goerrors.Is(err, rados.ErrNotFound) {
// This reftracker doesn't exist. Assume it was already deleted.
return true, nil
}
return false, fmt.Errorf("failed to read reftracker version: %w", err)
}
// Remove references from reftracker.
gen, err := ioctx.GetLastVersion()
if err != nil {
return false, fmt.Errorf("failed to get RADOS object version: %w", err)
}
var deleted bool
switch rtVer {
case v1.Version:
deleted, err = v1.Remove(ioctx, rtName, gen, refs)
if err != nil {
err = fmt.Errorf("failed to remove refs: %w", err)
}
default:
err = errors.UnknownObjectVersion(rtVer)
}
return deleted, err
}
var (
errNoRTName = goerrors.New("missing reftracker name")
errNoRefs = goerrors.New("missing refs")
)
func validateAddInput(rtName string, refs map[string]struct{}) error {
if rtName == "" {
return errNoRTName
}
if len(refs) == 0 {
return errNoRefs
}
return nil
}
func validateRemoveInput(rtName string, refs map[string]reftype.RefType) error {
if rtName == "" {
return errNoRTName
}
if len(refs) == 0 {
return errNoRefs
}
return nil
}

View File

@ -0,0 +1,491 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reftracker
import (
"testing"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
"github.com/ceph/ceph-csi/internal/util/reftracker/reftype"
"github.com/stretchr/testify/assert"
)
const rtName = "hello-rt"
func TestRTAdd(t *testing.T) {
t.Parallel()
// Verify input validation for reftracker name.
t.Run("AddNoName", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, "", nil)
assert.Error(ts, err)
assert.False(ts, created)
})
// Verify input validation for nil and empty refs.
t.Run("AddNoRefs", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
refs := []map[string]struct{}{
nil,
make(map[string]struct{}),
}
for _, ref := range refs {
created, err := Add(ioctx, rtName, ref)
assert.Error(ts, err)
assert.False(ts, created)
}
})
// Add multiple refs in a single Add().
t.Run("AddBulk", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
})
// Add refs where each Add() has some of the refs overlapping
// with the previous call.
t.Run("AddOverlapping", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
refsTable := []map[string]struct{}{
{"ref2": {}, "ref3": {}},
{"ref3": {}, "ref4": {}},
{"ref4": {}, "ref5": {}},
}
for _, refs := range refsTable {
created, err = Add(ioctx, rtName, refs)
assert.NoError(ts, err)
assert.False(ts, created)
}
})
}
func TestRTRemove(t *testing.T) {
t.Parallel()
// Verify input validation for nil and empty refs.
t.Run("RemoveNoRefs", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
refs := []map[string]reftype.RefType{
nil,
make(map[string]reftype.RefType),
}
for _, ref := range refs {
created, err := Remove(ioctx, rtName, ref)
assert.Error(ts, err)
assert.False(ts, created)
}
})
// Attempt to remove refs in a non-existent reftracker object should result
// in success, with deleted=true,err=nil.
t.Run("RemoveNotExists", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
deleted, err := Remove(ioctx, "xxx", map[string]reftype.RefType{
"ref1": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Removing only non-existent refs should not result in reftracker object
// deletion.
t.Run("RemoveNonExistentRefs", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"refX": reftype.Normal,
"refY": reftype.Normal,
"refZ": reftype.Normal,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
})
// Removing all refs plus some surplus should result in reftracker object
// deletion.
t.Run("RemoveNonExistentRefs", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"refX": reftype.Normal,
"refY": reftype.Normal,
"ref": reftype.Normal,
"refZ": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Bulk removal of all refs should result in reftracker object deletion.
t.Run("RemoveBulk", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
keys := []string{"ref1", "ref2", "ref3"}
refsToAdd := make(map[string]struct{})
refsToRemove := make(map[string]reftype.RefType)
for _, k := range keys {
refsToAdd[k] = struct{}{}
refsToRemove[k] = reftype.Normal
}
created, err := Add(ioctx, rtName, refsToAdd)
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, refsToRemove)
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Removal of all refs one-by-one should result in reftracker object deletion
// in the last Remove() call.
t.Run("RemoveSingle", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
for _, k := range []string{"ref3", "ref2"} {
deleted, errRemove := Remove(ioctx, rtName, map[string]reftype.RefType{
k: reftype.Normal,
})
assert.NoError(ts, errRemove)
assert.False(ts, deleted)
}
// Remove the last reference. It should remove the whole reftracker object too.
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Cycle through reftracker object twice.
t.Run("AddRemoveAddRemove", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
refsToAdd := map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
}
refsToRemove := map[string]reftype.RefType{
"ref1": reftype.Normal,
"ref2": reftype.Normal,
"ref3": reftype.Normal,
}
for i := 0; i < 2; i++ {
created, err := Add(ioctx, rtName, refsToAdd)
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, refsToRemove)
assert.NoError(ts, err)
assert.True(ts, deleted)
}
})
// Check for respecting idempotency by making multiple additions with overlapping keys
// and removing only ref keys that were distinct.
t.Run("AddOverlappingRemoveBulk", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
})
assert.True(ts, created)
assert.NoError(ts, err)
refsTable := []map[string]struct{}{
{"ref2": {}, "ref3": {}},
{"ref3": {}, "ref4": {}},
{"ref4": {}, "ref5": {}},
}
for _, refs := range refsTable {
created, err = Add(ioctx, rtName, refs)
assert.False(ts, created)
assert.NoError(ts, err)
}
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Normal,
"ref2": reftype.Normal,
"ref3": reftype.Normal,
"ref4": reftype.Normal,
"ref5": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
}
func TestRTMask(t *testing.T) {
t.Parallel()
// Bulk masking all refs should result in reftracker object deletion.
t.Run("MaskAllBulk", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
keys := []string{"ref1", "ref2", "ref3"}
refsToAdd := make(map[string]struct{})
refsToRemove := make(map[string]reftype.RefType)
for _, k := range keys {
refsToAdd[k] = struct{}{}
refsToRemove[k] = reftype.Mask
}
created, err := Add(ioctx, rtName, refsToAdd)
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, refsToRemove)
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Masking all refs one-by-one should result in reftracker object deletion in
// the last Remove() call.
t.Run("RemoveSingle", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
for _, k := range []string{"ref3", "ref2"} {
deleted, errRemove := Remove(ioctx, rtName, map[string]reftype.RefType{
k: reftype.Mask,
})
assert.NoError(ts, errRemove)
assert.False(ts, deleted)
}
// Remove the last reference. It should delete the whole reftracker object
// too.
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Mask,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Bulk removing two (out of 3) refs and then masking the ref that's left
// should result in reftracker object deletion in the last Remove() call.
t.Run("RemoveBulkMaskSingle", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Normal,
"ref2": reftype.Normal,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref3": reftype.Mask,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Bulk masking two (out of 3) refs and then removing the ref that's left
// should result in reftracker object deletion in the last Remove() call.
t.Run("MaskSingleRemoveBulk", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Mask,
"ref2": reftype.Mask,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref3": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Verify that masking refs hides them from future Add()s.
t.Run("MaskAndAdd", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Mask,
"ref2": reftype.Mask,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
created, err = Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
})
assert.NoError(ts, err)
assert.False(ts, created)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref3": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
// Verify that masked refs may be removed with reftype.Normal and re-added.
t.Run("MaskRemoveAdd", func(ts *testing.T) {
ts.Parallel()
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
created, err := Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
"ref3": {},
})
assert.NoError(ts, err)
assert.True(ts, created)
deleted, err := Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Mask,
"ref2": reftype.Mask,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Normal,
"ref2": reftype.Normal,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
created, err = Add(ioctx, rtName, map[string]struct{}{
"ref1": {},
"ref2": {},
})
assert.NoError(ts, err)
assert.False(ts, created)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref3": reftype.Normal,
})
assert.NoError(ts, err)
assert.False(ts, deleted)
deleted, err = Remove(ioctx, rtName, map[string]reftype.RefType{
"ref1": reftype.Normal,
"ref2": reftype.Normal,
})
assert.NoError(ts, err)
assert.True(ts, deleted)
})
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reftype
import (
"fmt"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
)
// RefType describes type of the reftracker reference.
type RefType int8
const (
refTypeSize = 1
// Unknown reftype used to signal error state.
Unknown RefType = 0
// Normal type tags the reference to have normal effect on the reference
// count. Adding Normal reference increments the reference count. Removing
// Normal reference decrements the reference count.
//
// It may be converted to a Mask if it is removed with Mask reftype.
Normal RefType = 1
// Mask type tags the reference to be masked, making it not contribute to the
// overall reference count. The reference will be ignored by all future Add()
// calls until it is removed with Normal reftype.
Mask RefType = 2
)
func ToBytes(t RefType) []byte {
return []byte{byte(t)}
}
func FromBytes(bs []byte) (RefType, error) {
if len(bs) != refTypeSize {
return Unknown, errors.UnexpectedReadSize(refTypeSize, len(bs))
}
num := RefType(bs[0])
switch num { // nolint:exhaustive // reftype.Unknown is handled in default case.
case Normal, Mask:
return num, nil
default:
return Unknown, fmt.Errorf("unknown reftype %d", num)
}
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reftype
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRefTypeBytes(t *testing.T) {
t.Parallel()
var (
refTypeNormalBytes = []byte{1}
refTypeMaskBytes = []byte{2}
expectedBytes = [][]byte{refTypeNormalBytes, refTypeMaskBytes}
refTypes = []RefType{Normal, Mask}
refTypeInvalidBytes = []byte{0xFF}
refTypeWrongSizeBytes = []byte{0, 0, 0, 0, 1}
)
t.Run("ToBytes", func(ts *testing.T) {
ts.Parallel()
for i := range expectedBytes {
bs := ToBytes(refTypes[i])
assert.Equal(ts, expectedBytes[i], bs)
}
})
t.Run("FromBytes", func(ts *testing.T) {
ts.Parallel()
for i := range refTypes {
refType, err := FromBytes(expectedBytes[i])
assert.NoError(ts, err)
assert.Equal(ts, refTypes[i], refType)
}
_, err := FromBytes(refTypeInvalidBytes)
assert.Error(ts, err)
_, err = FromBytes(refTypeWrongSizeBytes)
assert.Error(ts, err)
})
}

View File

@ -0,0 +1,47 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"encoding/binary"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
)
// Represents the number of references a reftracker object holds.
type refCount uint32
const (
Version = 1
refCountSize = 4
)
func (rc refCount) toBytes() []byte {
bs := make([]byte, refCountSize)
binary.BigEndian.PutUint32(bs, uint32(rc))
return bs
}
func refCountFromBytes(bs []byte) (refCount, error) {
if len(bs) != refCountSize {
return 0, errors.UnexpectedReadSize(refCountSize, len(bs))
}
return refCount(binary.BigEndian.Uint32(bs)), nil
}

View File

@ -0,0 +1,51 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestV1RefCountBytes(t *testing.T) {
t.Parallel()
var (
refCountBytes = []byte{0x0, 0x0, 0x0, 0x7B}
refCountValue = refCount(123)
wrongSizeRefCountBytes = []byte{0, 0, 1}
)
t.Run("ToBytes", func(ts *testing.T) {
ts.Parallel()
bs := refCountValue.toBytes()
assert.Equal(ts, refCountBytes, bs)
})
t.Run("FromBytes", func(ts *testing.T) {
ts.Parallel()
rc, err := refCountFromBytes(refCountBytes)
assert.NoError(ts, err)
assert.Equal(ts, refCountValue, rc)
_, err = refCountFromBytes(wrongSizeRefCountBytes)
assert.Error(ts, err)
})
}

View File

@ -0,0 +1,314 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
goerrors "errors"
"fmt"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
"github.com/ceph/ceph-csi/internal/util/reftracker/reftype"
"github.com/ceph/ceph-csi/internal/util/reftracker/version"
"github.com/ceph/go-ceph/rados"
)
/*
Version 1 layout:
-----------------
If not specified otherwise, all values are stored in big-endian order.
byte idx type name
-------- ------ ------
0 .. 3 uint32 refcount
`refcount`: Number of references held by the reftracker object. The actual
reference keys are stored in an OMap of the RADOS object.
OMap entry layout:
Key:
reftracker key.
Value:
byte idx type name
-------- ------ ------
0 .. 3 uint32 type
`type`: reference type defined in reftracker/reftype.
*/
type readResult struct {
// Total number of references held by the reftracker object.
total refCount
// Refs whose keys matched the request.
foundRefs map[string]reftype.RefType
}
// Atomically initializes a new reftracker object.
func Init(
ioctx radoswrapper.IOContextW,
rtName string,
refs map[string]struct{},
) error {
// Prepare refcount and OMap key-value pairs.
refsToAddBytes := make(map[string][]byte, len(refs))
for ref := range refs {
refsToAddBytes[ref] = reftype.ToBytes(reftype.Normal)
}
// Perform the write.
w := ioctx.CreateWriteOp()
defer w.Release()
w.Create(rados.CreateExclusive)
w.SetXattr(version.XattrName, version.ToBytes(Version))
w.SetOmap(refsToAddBytes)
w.WriteFull(refCount(len(refsToAddBytes)).toBytes())
return errors.FailedObjectWrite(w.Operate(rtName))
}
// Atomically adds refs to an existing reftracker object.
func Add(
ioctx radoswrapper.IOContextW,
rtName string,
gen uint64,
refs map[string]struct{},
) error {
// Read the reftracker object to figure out which refs to add.
readRes, err := readObjectByKeys(ioctx, rtName, gen, refsMapToKeysSlice(refs))
if err != nil {
return errors.FailedObjectRead(err)
}
// Build list of refs to add.
// Add only refs that are missing in the reftracker object.
refsToAdd := make(map[string][]byte)
for ref := range refs {
if _, found := readRes.foundRefs[ref]; !found {
refsToAdd[ref] = reftype.ToBytes(reftype.Normal)
}
}
if len(refsToAdd) == 0 {
// Nothing to do.
return nil
}
// Calculate new refcount.
rcToAdd := refCount(len(refsToAdd))
newRC := readRes.total + rcToAdd
if newRC < readRes.total {
return goerrors.New("addition would overflow uint32 refcount")
}
// Write the data.
w := ioctx.CreateWriteOp()
defer w.Release()
w.AssertVersion(gen)
w.WriteFull(newRC.toBytes())
w.SetOmap(refsToAdd)
return errors.FailedObjectWrite(w.Operate(rtName))
}
// Atomically removes refs from reftracker object. If the object wouldn't hold
// any references after the removal, the whole object is deleted instead.
func Remove(
ioctx radoswrapper.IOContextW,
rtName string,
gen uint64,
refs map[string]reftype.RefType,
) (bool, error) {
// Read the reftracker object to figure out which refs to remove.
readRes, err := readObjectByKeys(ioctx, rtName, gen, typedRefsMapToKeysSlice(refs))
if err != nil {
return false, errors.FailedObjectRead(err)
}
// Build lists of refs to remove, replace, and add.
// There are three cases that need to be handled:
// (1) removing reftype.Normal refs,
// (2) converting refs that were reftype.Normal into reftype.Mask,
// (3) adding a new reftype.Mask key.
var (
refsToRemove []string
refsToSet = make(map[string][]byte)
rcToSubtract refCount
)
for ref, refType := range refs {
if matchedRefType, found := readRes.foundRefs[ref]; found {
if refType == reftype.Normal {
// Case (1): regular removal of Normal ref.
refsToRemove = append(refsToRemove, ref)
if matchedRefType == reftype.Normal {
// If matchedRef was reftype.Mask, it would have already been
// subtracted from the refcount.
rcToSubtract++
}
} else if refType == reftype.Mask && matchedRefType == reftype.Normal {
// Case (2): convert Normal ref to Mask.
// Since this ref is now reftype.Mask, rcToSubtract needs to be adjusted
// too -- so that this ref is not counted in.
refsToSet[ref] = reftype.ToBytes(reftype.Mask)
rcToSubtract++
}
} else {
if refType == reftype.Mask {
// Case (3): add a new Mask ref.
// reftype.Mask doesn't contribute refcount so no change to rcToSubtract.
refsToSet[ref] = reftype.ToBytes(reftype.Mask)
} // else: No such ref was found, so there's nothing to remove.
}
}
if len(refsToRemove) == 0 && len(refsToSet) == 0 {
// Nothing to do.
return false, nil
}
// Calculate new refcount.
if rcToSubtract > readRes.total {
// BUG: this should never happen!
return false, fmt.Errorf("refcount underflow, reftracker object corrupted")
}
newRC := readRes.total - rcToSubtract
// If newRC is zero, it means all refs that the reftracker object held will be
// now gone, and the object must be deleted.
deleted := newRC == 0
// Write the data.
w := ioctx.CreateWriteOp()
defer w.Release()
w.AssertVersion(gen)
if deleted {
w.Remove()
} else {
w.WriteFull(newRC.toBytes())
w.RmOmapKeys(refsToRemove)
w.SetOmap(refsToSet)
}
if err := w.Operate(rtName); err != nil {
return false, errors.FailedObjectWrite(err)
}
return deleted, nil
}
// Tries to find `keys` in reftracker object and returns the result. Failing to
// find any particular key does not result in an error.
func readObjectByKeys(
ioctx radoswrapper.IOContextW,
rtName string,
gen uint64,
keys []string,
) (*readResult, error) {
// Read data from object.
rcBytes := make([]byte, refCountSize)
r := ioctx.CreateReadOp()
defer r.Release()
r.AssertVersion(gen)
r.Read(0, rcBytes)
s := r.GetOmapValuesByKeys(keys)
if err := r.Operate(rtName); err != nil {
return nil, errors.TryRADOSAborted(err)
}
// Convert it from byte slices to type-safe values.
var (
rc refCount
refs = make(map[string]reftype.RefType)
err error
)
rc, err = refCountFromBytes(rcBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse refcount: %w", err)
}
for {
kvPair, err := s.Next()
if err != nil {
return nil, fmt.Errorf("failed to iterate over OMap: %w", err)
}
if kvPair == nil {
break
}
refType, err := reftype.FromBytes(kvPair.Value)
if err != nil {
return nil, fmt.Errorf("failed to parse reftype: %w", err)
}
refs[kvPair.Key] = refType
}
return &readResult{
total: rc,
foundRefs: refs,
}, nil
}
func refsMapToKeysSlice(m map[string]struct{}) []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
return s
}
func typedRefsMapToKeysSlice(m map[string]reftype.RefType) []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
return s
}

View File

@ -0,0 +1,423 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
goerrors "errors"
"testing"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
"github.com/ceph/ceph-csi/internal/util/reftracker/reftype"
"github.com/stretchr/testify/assert"
)
func TestV1Read(t *testing.T) {
t.Parallel()
const rtName = "hello-rt"
var (
gen = uint64(0)
validObj = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Data: []byte{0, 0, 0, 0},
Omap: make(map[string][]byte),
},
},
})
invalidObjs = []*radoswrapper.FakeIOContext{
// Missing object.
radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()),
// Bad generation number.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Ver: 123,
Oid: rtName,
Data: []byte{0, 0, 0, 0},
},
},
}),
// Refcount overflow.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Data: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
},
}),
}
refsToAdd = map[string]struct{}{"ref1": {}}
)
err := Add(validObj, rtName, gen, refsToAdd)
assert.NoError(t, err)
for i := range invalidObjs {
err = Add(invalidObjs[i], rtName, gen, refsToAdd)
assert.Error(t, err)
}
// Check for correct error type for wrong gen num.
err = Add(invalidObjs[1], rtName, gen, refsToAdd)
assert.Error(t, err)
assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate))
}
func TestV1Init(t *testing.T) {
t.Parallel()
const rtName = "hello-rt"
var (
emptyRados = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{},
})
alreadyExists = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {},
},
})
refsToInit = map[string]struct{}{"ref1": {}}
)
err := Init(emptyRados, rtName, refsToInit)
assert.NoError(t, err)
err = Init(alreadyExists, rtName, refsToInit)
assert.Error(t, err)
}
func TestV1Add(t *testing.T) {
t.Parallel()
const rtName = "hello-rt"
var (
shouldSucceed = []struct {
before *radoswrapper.FakeObj
refsToAdd map[string]struct{}
after *radoswrapper.FakeObj
}{
// Add a new ref.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToAdd: map[string]struct{}{
"ref2": {},
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 1,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Normal),
},
Data: refCount(2).toBytes(),
},
},
// Try to add a ref that's already tracked.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToAdd: map[string]struct{}{
"ref1": {},
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
},
// Try to add a ref that's masked.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Mask),
},
Data: refCount(1).toBytes(),
},
refsToAdd: map[string]struct{}{
"ref1": {},
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Mask),
},
Data: refCount(1).toBytes(),
},
},
}
shouldFail = []*radoswrapper.FakeIOContext{
// Missing object.
radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados()),
// Bad generation number.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Ver: 123,
Oid: rtName,
Data: []byte{0, 0, 0, 0},
},
},
}),
// Refcount overflow.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Data: []byte{0xFF, 0xFF, 0xFF, 0xFF},
},
},
}),
}
)
for i := range shouldSucceed {
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
ioctx.Rados.Objs[rtName] = shouldSucceed[i].before
err := Add(ioctx, rtName, 0, shouldSucceed[i].refsToAdd)
assert.NoError(t, err)
assert.Equal(t, shouldSucceed[i].after, ioctx.Rados.Objs[rtName])
}
for i := range shouldFail {
err := Add(shouldFail[i], rtName, 0, map[string]struct{}{"ref1": {}})
assert.Error(t, err)
}
// Check for correct error type for wrong gen num.
err := Add(shouldFail[1], rtName, 0, map[string]struct{}{"ref1": {}})
assert.Error(t, err)
assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate))
}
func TestV1Remove(t *testing.T) {
t.Parallel()
const rtName = "hello-rt"
var (
shouldSucceed = []struct {
before *radoswrapper.FakeObj
refsToRemove map[string]reftype.RefType
after *radoswrapper.FakeObj
deleted bool
}{
// Remove without deleting the reftracker object.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Normal),
},
Data: refCount(2).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref1": reftype.Normal,
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 1,
Omap: map[string][]byte{
"ref2": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
deleted: false,
},
// Remove and delete the reftracker object.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref1": reftype.Normal,
},
after: nil,
deleted: true,
},
// Remove and delete the reftracker object.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref1": reftype.Normal,
},
after: nil,
deleted: true,
},
// Mask a ref without deleting reftracker object.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Normal),
},
Data: refCount(2).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref2": reftype.Mask,
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 1,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Mask),
},
Data: refCount(1).toBytes(),
},
deleted: false,
},
// Mask a ref and delete reftracker object.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref1": reftype.Mask,
},
after: nil,
deleted: true,
},
// Add a masking ref.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref2": reftype.Mask,
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 1,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
"ref2": reftype.ToBytes(reftype.Mask),
},
Data: refCount(1).toBytes(),
},
deleted: false,
},
// Try to remove non-existent ref.
{
before: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
refsToRemove: map[string]reftype.RefType{
"ref2": reftype.Normal,
},
after: &radoswrapper.FakeObj{
Oid: rtName,
Ver: 0,
Omap: map[string][]byte{
"ref1": reftype.ToBytes(reftype.Normal),
},
Data: refCount(1).toBytes(),
},
deleted: false,
},
}
// Bad generation number.
badGen = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Ver: 123,
},
},
})
)
for i := range shouldSucceed {
ioctx := radoswrapper.NewFakeIOContext(radoswrapper.NewFakeRados())
ioctx.Rados.Objs[rtName] = shouldSucceed[i].before
deleted, err := Remove(ioctx, rtName, 0, shouldSucceed[i].refsToRemove)
assert.NoError(t, err)
assert.Equal(t, shouldSucceed[i].deleted, deleted)
assert.Equal(t, shouldSucceed[i].after, ioctx.Rados.Objs[rtName])
}
_, err := Remove(badGen, rtName, 0, map[string]reftype.RefType{"ref": reftype.Normal})
assert.Error(t, err)
assert.True(t, goerrors.Is(err, errors.ErrObjectOutOfDate))
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"encoding/binary"
"github.com/ceph/ceph-csi/internal/util/reftracker/errors"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
)
// reftracker objects are versioned, should the object layout need to change.
// Version is stored in its underlying RADOS object xattr as uint32.
const (
// Name of the xattr entry in the RADOS object.
XattrName = "csi.ceph.com/rt-version"
// SizeBytes is the size of version in bytes.
SizeBytes = 4
)
func ToBytes(v uint32) []byte {
bs := make([]byte, SizeBytes)
binary.BigEndian.PutUint32(bs, v)
return bs
}
func FromBytes(bs []byte) (uint32, error) {
if len(bs) != SizeBytes {
return 0, errors.UnexpectedReadSize(SizeBytes, len(bs))
}
return binary.BigEndian.Uint32(bs), nil
}
func Read(ioctx radoswrapper.IOContextW, rtName string) (uint32, error) {
verBytes := make([]byte, SizeBytes)
readSize, err := ioctx.GetXattr(rtName, XattrName, verBytes)
if err != nil {
return 0, err
}
if readSize != SizeBytes {
return 0, errors.UnexpectedReadSize(SizeBytes, readSize)
}
return FromBytes(verBytes)
}

View File

@ -0,0 +1,111 @@
/*
Copyright 2022 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"testing"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
"github.com/stretchr/testify/assert"
)
var (
v1Bytes = []byte{0, 0, 0, 1}
v1Value = uint32(1)
wrongSizeVersionBytes = []byte{0, 0, 1}
)
func TestVersionBytes(t *testing.T) {
t.Parallel()
t.Run("ToBytes", func(ts *testing.T) {
ts.Parallel()
bs := ToBytes(v1Value)
assert.Equal(ts, v1Bytes, bs)
})
t.Run("FromBytes", func(ts *testing.T) {
ts.Parallel()
ver, err := FromBytes(v1Bytes)
assert.NoError(ts, err)
assert.Equal(ts, v1Value, ver)
_, err = FromBytes(wrongSizeVersionBytes)
assert.Error(ts, err)
})
}
func TestVersionRead(t *testing.T) {
t.Parallel()
const rtName = "hello-rt"
var (
validObj = radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Xattrs: map[string][]byte{
XattrName: v1Bytes,
},
},
},
})
invalidObjs = []*radoswrapper.FakeIOContext{
// Missing object.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{},
}),
// Missing xattr.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Xattrs: map[string][]byte{
"some-other-xattr": v1Bytes,
},
},
},
}),
// Wrongly sized version value.
radoswrapper.NewFakeIOContext(&radoswrapper.FakeRados{
Objs: map[string]*radoswrapper.FakeObj{
rtName: {
Oid: rtName,
Xattrs: map[string][]byte{
XattrName: wrongSizeVersionBytes,
},
},
},
}),
}
)
ver, err := Read(validObj, rtName)
assert.NoError(t, err)
assert.Equal(t, v1Value, ver)
for i := range invalidObjs {
_, err = Read(invalidObjs[i], rtName)
assert.Error(t, err)
}
}

View File

@ -168,14 +168,14 @@ func GetTopologyFromRequest(
return &topologyPools, accessibilityRequirements, nil
}
// MatchTopologyForPool returns the topology map, if the passed in pool matches any
// MatchPoolAndTopology returns the topology map, if the passed in pool matches any
// passed in accessibility constraints.
func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
accessibilityRequirements *csi.TopologyRequirement, poolName string) (map[string]string, error) {
func MatchPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
accessibilityRequirements *csi.TopologyRequirement, poolName string) (string, string, map[string]string, error) {
var topologyPool []TopologyConstrainedPool
if topologyPools == nil || accessibilityRequirements == nil {
return nil, nil
return "", "", nil, nil
}
// find the pool in the list of topology based pools
@ -187,13 +187,11 @@ func MatchTopologyForPool(topologyPools *[]TopologyConstrainedPool,
}
}
if len(topologyPool) == 0 {
return nil, fmt.Errorf("none of the configured topology pools (%+v) matched passed in pool name (%s)",
return "", "", nil, fmt.Errorf("none of the configured topology pools (%+v) matched passed in pool name (%s)",
topologyPools, poolName)
}
_, _, topology, err := FindPoolAndTopology(&topologyPool, accessibilityRequirements)
return topology, err
return FindPoolAndTopology(&topologyPool, accessibilityRequirements)
}
// FindPoolAndTopology loops through passed in "topologyPools" and also related

View File

@ -37,7 +37,7 @@ func checkAndReportError(t *testing.T, msg string, err error) {
}
}
// TestFindPoolAndTopology also tests MatchTopologyForPool.
// TestFindPoolAndTopology also tests MatchPoolAndTopology.
func TestFindPoolAndTopology(t *testing.T) {
t.Parallel()
var err error
@ -319,15 +319,15 @@ func TestFindPoolAndTopology(t *testing.T) {
t.Errorf("expected data pool to be named ec-%s, got %s", poolName, dataPoolName)
}
// TEST: MatchTopologyForPool
// TEST: MatchPoolAndTopology
// check for non-existent pool
_, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1+"fuzz")
_, _, _, err = MatchPoolAndTopology(&validMultipleTopoPools, &validAccReq, pool1+"fuzz")
if err == nil {
t.Errorf("expected failure due to non-existent pool name (%s) got success", pool1+"fuzz")
}
// check for existing pool
topoSegment, err = MatchTopologyForPool(&validMultipleTopoPools, &validAccReq, pool1)
_, _, topoSegment, err = MatchPoolAndTopology(&validMultipleTopoPools, &validAccReq, pool1)
err = checkOutput(err, pool1, topoSegment)
checkAndReportError(t, "expected success got:", err)
}

View File

@ -308,6 +308,18 @@ func IsMountPoint(p string) (bool, error) {
return !notMnt, nil
}
// IsCorruptedMountError checks if the given error is a result of a corrupted
// mountpoint.
func IsCorruptedMountError(err error) bool {
return mount.IsCorruptedMnt(err)
}
// ReadMountInfoForProc reads /proc/<PID>/mountpoint and marshals it into
// MountInfo structs.
func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) {
return mount.ParseMountInfo(fmt.Sprintf("/proc/%s/mountinfo", proc))
}
// Mount mounts the source to target path.
func Mount(source, target, fstype string, options []string) error {
dummyMount := mount.New("")

View File

@ -29,6 +29,8 @@ RUN dnf -y install \
librados-devel \
librbd-devel \
&& dnf -y update \
&& dnf clean all \
&& rm -rf /var/cache/yum \
&& true
WORKDIR "/go/src/github.com/ceph/ceph-csi"

View File

@ -6,7 +6,7 @@
# options for analysis running
run:
build-tags:
- @@CEPH_VERSION@@
@@BUILD_TAGS@@
# default concurrency is a available CPU number
concurrency: 4
@ -169,7 +169,11 @@ linters:
- funlen
- testpackage
- exhaustivestruct
# TODO: enable goerr113, see: https://github.com/ceph/ceph-csi/issues/1227
# This requires extra addition of unnecessary code. Hence, we
# prefer to disable this linter. But, it can be enabled if we
# have better resolution. For more details check the
# issue below.
# see: https://github.com/ceph/ceph-csi/issues/1227
- goerr113
- forbidigo
# TODO: enable gomoddirectives

View File

@ -124,6 +124,21 @@ function validate_container_cmd() {
fi
}
# validate csi sidecar image version
function validate_sidecar() {
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
# shellcheck disable=SC1091
source "${SCRIPT_DIR}/../build.env"
sidecars=(CSI_ATTACHER_VERSION CSI_SNAPSHOTTER_VERSION CSI_PROVISIONER_VERSION CSI_RESIZER_VERSION CSI_NODE_DRIVER_REGISTRAR_VERSION)
for sidecar in "${sidecars[@]}"; do
if [[ -z "${!sidecar}" ]]; then
echo "${sidecar}" version is empty, make sure build.env has set this sidecar version
exit 1
fi
done
}
# Storage providers and the default storage class is not needed for Ceph-CSI
# testing. In order to reduce resources and potential conflicts between storage
# plugins, disable them.
@ -161,13 +176,6 @@ else
DISK_CONFIG=""
fi
#configure csi sidecar version
CSI_ATTACHER_VERSION=${CSI_ATTACHER_VERSION:-"v3.2.1"}
CSI_SNAPSHOTTER_VERSION=${CSI_SNAPSHOTTER_VERSION:-"v4.1.1"}
CSI_PROVISIONER_VERSION=${CSI_PROVISIONER_VERSION:-"v2.2.2"}
CSI_RESIZER_VERSION=${CSI_RESIZER_VERSION:-"v1.2.0"}
CSI_NODE_DRIVER_REGISTRAR_VERSION=${CSI_NODE_DRIVER_REGISTRAR_VERSION:-"v2.2.0"}
# configure csi image version
CSI_IMAGE_VERSION=${CSI_IMAGE_VERSION:-"canary"}
@ -290,6 +298,8 @@ cephcsi)
copy_image_to_cluster "${CEPHCSI_IMAGE_REPO}"/cephcsi:"${CSI_IMAGE_VERSION}" "${CEPHCSI_IMAGE_REPO}"/cephcsi:"${CSI_IMAGE_VERSION}"
;;
k8s-sidecar)
echo "validating sidecar's image version"
validate_sidecar
echo "copying the kubernetes sidecar images"
copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}" "${K8S_IMAGE_REPO}/csi-attacher:${CSI_ATTACHER_VERSION}"
copy_image_to_cluster "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}" "${K8S_IMAGE_REPO}/csi-snapshotter:${CSI_SNAPSHOTTER_VERSION}"

View File

@ -19,8 +19,10 @@ package main
import (
"fmt"
"os"
"path"
"reflect"
"github.com/ceph/ceph-csi/api/deploy/kubernetes/nfs"
"github.com/ceph/ceph-csi/api/deploy/kubernetes/rbd"
"github.com/ceph/ceph-csi/api/deploy/ocp"
)
@ -46,6 +48,11 @@ var yamlArtifacts = []deploymentArtifact{
reflect.ValueOf(ocp.NewSecurityContextConstraintsYAML),
reflect.ValueOf(ocp.SecurityContextConstraintsDefaults),
},
{
"../deploy/nfs/kubernetes/csidriver.yaml",
reflect.ValueOf(nfs.NewCSIDriverYAML),
reflect.ValueOf(nfs.CSIDriverDefaults),
},
{
"../deploy/rbd/kubernetes/csidriver.yaml",
reflect.ValueOf(rbd.NewCSIDriverYAML),
@ -67,6 +74,15 @@ func main() {
func writeArtifact(artifact deploymentArtifact) {
fmt.Printf("creating %q...", artifact.filename)
dir := path.Dir(artifact.filename)
_, err := os.Stat(dir)
if os.IsNotExist(err) {
err = os.MkdirAll(dir, 0o775)
if err != nil {
panic(fmt.Sprintf("failed to create directory %q: %v", dir, err))
}
}
f, err := os.Create(artifact.filename)
if err != nil {
panic(fmt.Sprintf("failed to create file %q: %v", artifact.filename, err))

202
vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt generated vendored Normal file
View File

@ -0,0 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

166
vendor/github.com/aws/aws-sdk-go-v2/aws/config.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
package aws
import (
"net/http"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
// HTTPClient provides the interface to provide custom HTTPClients. Generally
// *http.Client is sufficient for most use cases. The HTTPClient should not
// follow redirects.
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// A Config provides service configuration for service clients.
type Config struct {
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for
// information on AWS regions.
Region string
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials CredentialsProvider
// The HTTP Client the SDK's API clients will use to invoke HTTP requests.
// The SDK defaults to a BuildableClient allowing API clients to create
// copies of the HTTP Client for service specific customizations.
//
// Use a (*http.Client) for custom behavior. Using a custom http.Client
// will prevent the SDK from modifying the HTTP client.
HTTPClient HTTPClient
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// See the `aws.EndpointResolver` documentation for additional usage
// information.
//
// Deprecated: See Config.EndpointResolverWithOptions
EndpointResolver EndpointResolver
// An endpoint resolver that can be used to provide or override an endpoint
// for the given service and region.
//
// When EndpointResolverWithOptions is specified, it will be used by a
// service client rather than using EndpointResolver if also specified.
//
// See the `aws.EndpointResolverWithOptions` documentation for additional
// usage information.
EndpointResolverWithOptions EndpointResolverWithOptions
// RetryMaxAttempts specifies the maximum number attempts an API client
// will call an operation that fails with a retryable error.
//
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMaxAttempts int
// RetryMode specifies the retry model the API client will be created with.
//
// API Clients will only use this value to construct a retryer if the
// Config.Retryer member is not nil. This value will be ignored if
// Retryer is not nil.
RetryMode RetryMode
// Retryer is a function that provides a Retryer implementation. A Retryer
// guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
//
// In general, the provider function should return a new instance of a
// Retryer if you are attempting to provide a consistent Retryer
// configuration across all clients. This will ensure that each client will
// be provided a new instance of the Retryer implementation, and will avoid
// issues such as sharing the same retry token bucket across services.
//
// If not nil, RetryMaxAttempts, and RetryMode will be ignored by API
// clients.
Retryer func() Retryer
// ConfigSources are the sources that were used to construct the Config.
// Allows for additional configuration to be loaded by clients.
ConfigSources []interface{}
// APIOptions provides the set of middleware mutations modify how the API
// client requests will be handled. This is useful for adding additional
// tracing data to a request, or changing behavior of the SDK's client.
APIOptions []func(*middleware.Stack) error
// The logger writer interface to write logging messages to. Defaults to
// standard error.
Logger logging.Logger
// Configures the events that will be sent to the configured logger. This
// can be used to configure the logging of signing, retries, request, and
// responses of the SDK clients.
//
// See the ClientLogMode type documentation for the complete set of logging
// modes and available configuration.
ClientLogMode ClientLogMode
// The configured DefaultsMode. If not specified, service clients will
// default to legacy.
//
// Supported modes are: auto, cross-region, in-region, legacy, mobile,
// standard
DefaultsMode DefaultsMode
// The RuntimeEnvironment configuration, only populated if the DefaultsMode
// is set to DefaultsModeAuto and is initialized by
// `config.LoadDefaultConfig`. You should not populate this structure
// programmatically, or rely on the values here within your applications.
RuntimeEnvironment RuntimeEnvironment
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
func NewConfig() *Config {
return &Config{}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c Config) Copy() Config {
cp := c
return cp
}
// EndpointDiscoveryEnableState indicates if endpoint discovery is
// enabled, disabled, auto or unset state.
//
// Default behavior (Auto or Unset) indicates operations that require endpoint
// discovery will use Endpoint Discovery by default. Operations that
// optionally use Endpoint Discovery will not use Endpoint Discovery
// unless EndpointDiscovery is explicitly enabled.
type EndpointDiscoveryEnableState uint
// Enumeration values for EndpointDiscoveryEnableState
const (
// EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset.
// Users do not need to use this value explicitly. The behavior for unset
// is the same as for EndpointDiscoveryAuto.
EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota
// EndpointDiscoveryAuto represents an AUTO state that allows endpoint
// discovery only when required by the api. This is the default
// configuration resolved by the client if endpoint discovery is neither
// enabled or disabled.
EndpointDiscoveryAuto // default state
// EndpointDiscoveryDisabled indicates client MUST not perform endpoint
// discovery even when required.
EndpointDiscoveryDisabled
// EndpointDiscoveryEnabled indicates client MUST always perform endpoint
// discovery if supported for the operation.
EndpointDiscoveryEnabled
)

22
vendor/github.com/aws/aws-sdk-go-v2/aws/context.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package aws
import (
"context"
"time"
)
type suppressedContext struct {
context.Context
}
func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
func (s *suppressedContext) Done() <-chan struct{} {
return nil
}
func (s *suppressedContext) Err() error {
return nil
}

View File

@ -0,0 +1,139 @@
package aws
import (
"context"
"sync/atomic"
"time"
sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
"github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
)
// CredentialsCacheOptions are the options
type CredentialsCacheOptions struct {
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// An ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired. This can cause an
// increased number of requests to refresh the credentials to occur.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// ExpiryWindowJitterFrac provides a mechanism for randomizing the expiration of credentials
// within the configured ExpiryWindow by a random percentage. Valid values are between 0.0 and 1.0.
//
// As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac is 0.5 then credentials will be set to
// expire between 30 to 60 seconds prior to their actual expiration time.
//
// If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
// If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
// If ExpiryWindowJitterFrac < 0 the value will be treated as 0.
// If ExpiryWindowJitterFrac > 1 the value will be treated as 1.
ExpiryWindowJitterFrac float64
}
// CredentialsCache provides caching and concurrency safe credentials retrieval
// via the provider's retrieve method.
type CredentialsCache struct {
// provider is the CredentialProvider implementation to be wrapped by the CredentialCache.
provider CredentialsProvider
options CredentialsCacheOptions
creds atomic.Value
sf singleflight.Group
}
// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider is expected to not be nil. A variadic
// list of one or more functions can be provided to modify the CredentialsCache configuration. This allows for
// configuration of credential expiry window and jitter.
func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
options := CredentialsCacheOptions{}
for _, fn := range optFns {
fn(&options)
}
if options.ExpiryWindow < 0 {
options.ExpiryWindow = 0
}
if options.ExpiryWindowJitterFrac < 0 {
options.ExpiryWindowJitterFrac = 0
} else if options.ExpiryWindowJitterFrac > 1 {
options.ExpiryWindowJitterFrac = 1
}
return &CredentialsCache{
provider: provider,
options: options,
}
}
// Retrieve returns the credentials. If the credentials have already been
// retrieved, and not expired the cached credentials will be returned. If the
// credentials have not been retrieved yet, or expired the provider's Retrieve
// method will be called.
//
// Returns and error if the provider's retrieve method returns an error.
func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
if creds := p.getCreds(); creds != nil {
return *creds, nil
}
resCh := p.sf.DoChan("", func() (interface{}, error) {
return p.singleRetrieve(&suppressedContext{ctx})
})
select {
case res := <-resCh:
return res.Val.(Credentials), res.Err
case <-ctx.Done():
return Credentials{}, &RequestCanceledError{Err: ctx.Err()}
}
}
func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
if creds := p.getCreds(); creds != nil {
return *creds, nil
}
creds, err := p.provider.Retrieve(ctx)
if err == nil {
if creds.CanExpire {
randFloat64, err := sdkrand.CryptoRandFloat64()
if err != nil {
return Credentials{}, err
}
jitter := time.Duration(randFloat64 * p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
creds.Expires = creds.Expires.Add(-(p.options.ExpiryWindow - jitter))
}
p.creds.Store(&creds)
}
return creds, err
}
func (p *CredentialsCache) getCreds() *Credentials {
v := p.creds.Load()
if v == nil {
return nil
}
c := v.(*Credentials)
if c != nil && c.HasKeys() && !c.Expired() {
return c
}
return nil
}
// Invalidate will invalidate the cached credentials. The next call to Retrieve
// will cause the provider's Retrieve method to be called.
func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil))
}

127
vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
package aws
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
)
// AnonymousCredentials provides a sentinel CredentialsProvider that should be
// used to instruct the SDK's signing middleware to not sign the request.
//
// Using `nil` credentials when configuring an API client will achieve the same
// result. The AnonymousCredentials type allows you to configure the SDK's
// external config loading to not attempt to source credentials from the shared
// config or environment.
//
// For example you can use this CredentialsProvider with an API client's
// Options to instruct the client not to sign a request for accessing public
// S3 bucket objects.
//
// The following example demonstrates using the AnonymousCredentials to prevent
// SDK's external config loading attempt to resolve credentials.
//
// cfg, err := config.LoadDefaultConfig(context.TODO(),
// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
// )
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(cfg)
//
// Alternatively you can leave the API client Option's `Credential` member to
// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
// the `Credentials` member to nil, if the external config resolved a
// credential provider.
//
// client := s3.New(s3.Options{
// // Credentials defaults to a nil value.
// })
//
// This can also be configured for specific operations calls too.
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(config)
//
// result, err := client.GetObject(context.TODO(), s3.GetObject{
// Bucket: aws.String("example-bucket"),
// Key: aws.String("example-key"),
// }, func(o *s3.Options) {
// o.Credentials = nil
// // Or
// o.Credentials = aws.AnonymousCredentials{}
// })
type AnonymousCredentials struct{}
// Retrieve implements the CredentialsProvider interface, but will always
// return error, and cannot be used to sign a request. The AnonymousCredentials
// type is used as a sentinel type instructing the AWS request signing
// middleware to not sign a request.
func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
return Credentials{Source: "AnonymousCredentials"},
fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
}
// A Credentials is the AWS credentials value for individual credential fields.
type Credentials struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Source of the credentials
Source string
// Time the credentials will expire.
CanExpire bool
Expires time.Time
}
// Expired returns if the credentials have expired.
func (v Credentials) Expired() bool {
if v.CanExpire {
// Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
// time is always based on reported wall-clock time.
return !v.Expires.After(sdk.NowTime().Round(0))
}
return false
}
// HasKeys returns if the credentials keys are set.
func (v Credentials) HasKeys() bool {
return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
}
// A CredentialsProvider is the interface for any component which will provide
// credentials Credentials. A CredentialsProvider is required to manage its own
// Expired state, and what to be expired means.
//
// A credentials provider implementation can be wrapped with a CredentialCache
// to cache the credential value retrieved. Without the cache the SDK will
// attempt to retrieve the credentials for every request.
type CredentialsProvider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve(ctx context.Context) (Credentials, error)
}
// CredentialsProviderFunc provides a helper wrapping a function value to
// satisfy the CredentialsProvider interface.
type CredentialsProviderFunc func(context.Context) (Credentials, error)
// Retrieve delegates to the function value the CredentialsProviderFunc wraps.
func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
return fn(ctx)
}

View File

@ -0,0 +1,38 @@
package defaults
import (
"github.com/aws/aws-sdk-go-v2/aws"
"runtime"
"strings"
)
var getGOOS = func() string {
return runtime.GOOS
}
// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode
// is set to aws.DefaultsModeAuto.
func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode {
goos := getGOOS()
if goos == "android" || goos == "ios" {
return aws.DefaultsModeMobile
}
var currentRegion string
if len(environment.EnvironmentIdentifier) > 0 {
currentRegion = environment.Region
}
if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 {
currentRegion = environment.EC2InstanceMetadataRegion
}
if len(region) > 0 && len(currentRegion) > 0 {
if strings.EqualFold(region, currentRegion) {
return aws.DefaultsModeInRegion
}
return aws.DefaultsModeCrossRegion
}
return aws.DefaultsModeStandard
}

View File

@ -0,0 +1,43 @@
package defaults
import (
"time"
"github.com/aws/aws-sdk-go-v2/aws"
)
// Configuration is the set of SDK configuration options that are determined based
// on the configured DefaultsMode.
type Configuration struct {
// RetryMode is the configuration's default retry mode API clients should
// use for constructing a Retryer.
RetryMode aws.RetryMode
// ConnectTimeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// See https://pkg.go.dev/net#Dialer.Timeout
ConnectTimeout *time.Duration
// TLSNegotiationTimeout specifies the maximum amount of time waiting to
// wait for a TLS handshake.
//
// See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout
TLSNegotiationTimeout *time.Duration
}
// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set.
func (c *Configuration) GetConnectTimeout() (time.Duration, bool) {
if c.ConnectTimeout == nil {
return 0, false
}
return *c.ConnectTimeout, true
}
// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set.
func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) {
if c.TLSNegotiationTimeout == nil {
return 0, false
}
return *c.TLSNegotiationTimeout, true
}

View File

@ -0,0 +1,50 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT.
package defaults
import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"time"
)
// GetModeConfiguration returns the default Configuration descriptor for the given mode.
//
// Supports the following modes: cross-region, in-region, mobile, standard
func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) {
var mv aws.DefaultsMode
mv.SetFromString(string(mode))
switch mv {
case aws.DefaultsModeCrossRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeInRegion:
settings := Configuration{
ConnectTimeout: aws.Duration(1100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeMobile:
settings := Configuration{
ConnectTimeout: aws.Duration(30000 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond),
}
return settings, nil
case aws.DefaultsModeStandard:
settings := Configuration{
ConnectTimeout: aws.Duration(3100 * time.Millisecond),
RetryMode: aws.RetryMode("standard"),
TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
}
return settings, nil
default:
return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode)
}
}

View File

@ -0,0 +1,2 @@
// Package defaults provides recommended configuration values for AWS SDKs and CLIs.
package defaults

View File

@ -0,0 +1,95 @@
// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT.
package aws
import (
"strings"
)
// DefaultsMode is the SDK defaults mode setting.
type DefaultsMode string
// The DefaultsMode constants.
const (
// DefaultsModeAuto is an experimental mode that builds on the standard mode.
// The SDK will attempt to discover the execution environment to determine the
// appropriate settings automatically.
//
// Note that the auto detection is heuristics-based and does not guarantee 100%
// accuracy. STANDARD mode will be used if the execution environment cannot
// be determined. The auto detection might query EC2 Instance Metadata service
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html),
// which might introduce latency. Therefore we recommend choosing an explicit
// defaults_mode instead if startup latency is critical to your application
DefaultsModeAuto DefaultsMode = "auto"
// DefaultsModeCrossRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services in a different region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeCrossRegion DefaultsMode = "cross-region"
// DefaultsModeInRegion builds on the standard mode and includes optimization
// tailored for applications which call AWS services from within the same AWS
// region
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeInRegion DefaultsMode = "in-region"
// DefaultsModeLegacy provides default settings that vary per SDK and were used
// prior to establishment of defaults_mode
DefaultsModeLegacy DefaultsMode = "legacy"
// DefaultsModeMobile builds on the standard mode and includes optimization
// tailored for mobile applications
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeMobile DefaultsMode = "mobile"
// DefaultsModeStandard provides the latest recommended default values that
// should be safe to run in most scenarios
//
// Note that the default values vended from this mode might change as best practices
// may evolve. As a result, it is encouraged to perform tests when upgrading
// the SDK
DefaultsModeStandard DefaultsMode = "standard"
)
// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches
// the provided string when compared using EqualFold. If the value does not match a known
// constant it will be set to as-is and the function will return false. As a special case, if the
// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode.
func (d *DefaultsMode) SetFromString(v string) (ok bool) {
switch {
case strings.EqualFold(v, string(DefaultsModeAuto)):
*d = DefaultsModeAuto
ok = true
case strings.EqualFold(v, string(DefaultsModeCrossRegion)):
*d = DefaultsModeCrossRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeInRegion)):
*d = DefaultsModeInRegion
ok = true
case strings.EqualFold(v, string(DefaultsModeLegacy)):
*d = DefaultsModeLegacy
ok = true
case strings.EqualFold(v, string(DefaultsModeMobile)):
*d = DefaultsModeMobile
ok = true
case strings.EqualFold(v, string(DefaultsModeStandard)):
*d = DefaultsModeStandard
ok = true
case len(v) == 0:
*d = DefaultsModeLegacy
ok = true
default:
*d = DefaultsMode(v)
}
return ok
}

62
vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Package aws provides the core SDK's utilities and shared types. Use this package's
// utilities to simplify setting and reading API operations parameters.
//
// Value and Pointer Conversion Utilities
//
// This package includes a helper conversion utility for each scalar type the SDK's
// API use. These utilities make getting a pointer of the scalar, and dereferencing
// a pointer easier.
//
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
// The Pointer to value will safely dereference the pointer and return its value.
// If the pointer was nil, the scalar's zero value will be returned.
//
// The value to pointer functions will be named after the scalar type. So get a
// *string from a string value use the "String" function. This makes it easy to
// to get pointer of a literal string value, because getting the address of a
// literal requires assigning the value to a variable first.
//
// var strPtr *string
//
// // Without the SDK's conversion functions
// str := "my string"
// strPtr = &str
//
// // With the SDK's conversion functions
// strPtr = aws.String("my string")
//
// // Convert *string to string value
// str = aws.ToString(strPtr)
//
// In addition to scalars the aws package also includes conversion utilities for
// map and slice for commonly types used in API parameters. The map and slice
// conversion functions use similar naming pattern as the scalar conversion
// functions.
//
// var strPtrs []*string
// var strs []string = []string{"Go", "Gophers", "Go"}
//
// // Convert []string to []*string
// strPtrs = aws.StringSlice(strs)
//
// // Convert []*string to []string
// strs = aws.ToStringSlice(strPtrs)
//
// SDK Default HTTP Client
//
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
// the SDK's Session, or service client constructor. This means that if the
// http.DefaultClient is modified by other components of your application the
// modifications will be picked up by the SDK as well.
//
// In some cases this might be intended, but it is a better practice to create
// a custom HTTP Client to share explicitly through your application. You can
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
// value of the SDK's Config type when creating a Session or service client.
package aws
// generate.go uses a build tag of "ignore", go run doesn't need to specify
// this because go run ignores all build flags when running a go file directly.
//go:generate go run -tags codegen generate.go
//go:generate go run -tags codegen logging_generate.go
//go:generate gofmt -w -s .

229
vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
package aws
import (
"fmt"
)
// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior.
type DualStackEndpointState uint
const (
// DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution.
DualStackEndpointStateUnset DualStackEndpointState = iota
// DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints.
DualStackEndpointStateEnabled
// DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
DualStackEndpointStateDisabled
)
// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) {
type iface interface {
GetUseDualStackEndpoint() DualStackEndpointState
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetUseDualStackEndpoint()
found = true
break
}
}
return value, found
}
// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
type FIPSEndpointState uint
const (
// FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
FIPSEndpointStateUnset FIPSEndpointState = iota
// FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
FIPSEndpointStateEnabled
// FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
FIPSEndpointStateDisabled
)
// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) {
type iface interface {
GetUseFIPSEndpoint() FIPSEndpointState
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetUseFIPSEndpoint()
found = true
break
}
}
return value, found
}
// Endpoint represents the endpoint a service client should make API operation
// calls to.
//
// The SDK will automatically resolve these endpoints per API client using an
// internal endpoint resolvers. If you'd like to provide custom endpoint
// resolving behavior you can implement the EndpointResolver interface.
type Endpoint struct {
// The base URL endpoint the SDK API clients will use to make API calls to.
// The SDK will suffix URI path and query elements to this endpoint.
URL string
// Specifies if the endpoint's hostname can be modified by the SDK's API
// client.
//
// If the hostname is mutable the SDK API clients may modify any part of
// the hostname based on the requirements of the API, (e.g. adding, or
// removing content in the hostname). Such as, Amazon S3 API client
// prefixing "bucketname" to the hostname, or changing the
// hostname service name component from "s3." to "s3-accesspoint.dualstack."
// for the dualstack endpoint of an S3 Accesspoint resource.
//
// Care should be taken when providing a custom endpoint for an API. If the
// endpoint hostname is mutable, and the client cannot modify the endpoint
// correctly, the operation call will most likely fail, or have undefined
// behavior.
//
// If hostname is immutable, the SDK API clients will not modify the
// hostname of the URL. This may cause the API client not to function
// correctly if the API requires the operation specific hostname values
// to be used by the client.
//
// This flag does not modify the API client's behavior if this endpoint
// will be used instead of Endpoint Discovery, or if the endpoint will be
// used to perform Endpoint Discovery. That behavior is configured via the
// API Client's Options.
HostnameImmutable bool
// The AWS partition the endpoint belongs to.
PartitionID string
// The service name that should be used for signing the requests to the
// endpoint.
SigningName string
// The region that should be used for signing the request to the endpoint.
SigningRegion string
// The signing method that should be used for signing the requests to the
// endpoint.
SigningMethod string
// The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata.
// When providing a custom endpoint, you should set the source as EndpointSourceCustom.
// If source is not provided when providing a custom endpoint, the SDK may not
// perform required host mutations correctly. Source should be used along with
// HostnameImmutable property as per the usage requirement.
Source EndpointSource
}
// EndpointSource is the endpoint source type.
type EndpointSource int
const (
// EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source.
EndpointSourceServiceMetadata EndpointSource = iota
// EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when
// user provides a custom endpoint to be used by the SDK.
EndpointSourceCustom
)
// EndpointNotFoundError is a sentinel error to indicate that the
// EndpointResolver implementation was unable to resolve an endpoint for the
// given service and region. Resolvers should use this to indicate that an API
// client should fallback and attempt to use it's internal default resolver to
// resolve the endpoint.
type EndpointNotFoundError struct {
Err error
}
// Error is the error message.
func (e *EndpointNotFoundError) Error() string {
return fmt.Sprintf("endpoint not found, %v", e.Err)
}
// Unwrap returns the underlying error.
func (e *EndpointNotFoundError) Unwrap() error {
return e.Err
}
// EndpointResolver is an endpoint resolver that can be used to provide or
// override an endpoint for the given service and region. API clients will
// attempt to use the EndpointResolver first to resolve an endpoint if
// available. If the EndpointResolver returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
//
// Deprecated: See EndpointResolverWithOptions
type EndpointResolver interface {
ResolveEndpoint(service, region string) (Endpoint, error)
}
// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
//
// Deprecated: See EndpointResolverWithOptionsFunc
type EndpointResolverFunc func(service, region string) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
//
// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint
func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
return e(service, region)
}
// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or
// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will
// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if
// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
type EndpointResolverWithOptions interface {
ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error)
}
// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) {
return e(service, region, options...)
}
// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value.
// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS.
func GetDisableHTTPS(options ...interface{}) (value bool, found bool) {
type iface interface {
GetDisableHTTPS() bool
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetDisableHTTPS()
found = true
break
}
}
return value, found
}
// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value.
// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion.
func GetResolvedRegion(options ...interface{}) (value string, found bool) {
type iface interface {
GetResolvedRegion() string
}
for _, option := range options {
if i, ok := option.(iface); ok {
value = i.GetResolvedRegion()
found = true
break
}
}
return value, found
}

9
vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package aws
// MissingRegionError is an error that is returned if region configuration
// value was not found.
type MissingRegionError struct{}
func (*MissingRegionError) Error() string {
return "an AWS region is required, but was not found"
}

365
vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
// Code generated by aws/generate.go DO NOT EDIT.
package aws
import (
"github.com/aws/smithy-go/ptr"
"time"
)
// ToBool returns bool value dereferenced if the passed
// in pointer was not nil. Returns a bool zero value if the
// pointer was nil.
func ToBool(p *bool) (v bool) {
return ptr.ToBool(p)
}
// ToBoolSlice returns a slice of bool values, that are
// dereferenced if the passed in pointer was not nil. Returns a bool
// zero value if the pointer was nil.
func ToBoolSlice(vs []*bool) []bool {
return ptr.ToBoolSlice(vs)
}
// ToBoolMap returns a map of bool values, that are
// dereferenced if the passed in pointer was not nil. The bool
// zero value is used if the pointer was nil.
func ToBoolMap(vs map[string]*bool) map[string]bool {
return ptr.ToBoolMap(vs)
}
// ToByte returns byte value dereferenced if the passed
// in pointer was not nil. Returns a byte zero value if the
// pointer was nil.
func ToByte(p *byte) (v byte) {
return ptr.ToByte(p)
}
// ToByteSlice returns a slice of byte values, that are
// dereferenced if the passed in pointer was not nil. Returns a byte
// zero value if the pointer was nil.
func ToByteSlice(vs []*byte) []byte {
return ptr.ToByteSlice(vs)
}
// ToByteMap returns a map of byte values, that are
// dereferenced if the passed in pointer was not nil. The byte
// zero value is used if the pointer was nil.
func ToByteMap(vs map[string]*byte) map[string]byte {
return ptr.ToByteMap(vs)
}
// ToString returns string value dereferenced if the passed
// in pointer was not nil. Returns a string zero value if the
// pointer was nil.
func ToString(p *string) (v string) {
return ptr.ToString(p)
}
// ToStringSlice returns a slice of string values, that are
// dereferenced if the passed in pointer was not nil. Returns a string
// zero value if the pointer was nil.
func ToStringSlice(vs []*string) []string {
return ptr.ToStringSlice(vs)
}
// ToStringMap returns a map of string values, that are
// dereferenced if the passed in pointer was not nil. The string
// zero value is used if the pointer was nil.
func ToStringMap(vs map[string]*string) map[string]string {
return ptr.ToStringMap(vs)
}
// ToInt returns int value dereferenced if the passed
// in pointer was not nil. Returns a int zero value if the
// pointer was nil.
func ToInt(p *int) (v int) {
return ptr.ToInt(p)
}
// ToIntSlice returns a slice of int values, that are
// dereferenced if the passed in pointer was not nil. Returns a int
// zero value if the pointer was nil.
func ToIntSlice(vs []*int) []int {
return ptr.ToIntSlice(vs)
}
// ToIntMap returns a map of int values, that are
// dereferenced if the passed in pointer was not nil. The int
// zero value is used if the pointer was nil.
func ToIntMap(vs map[string]*int) map[string]int {
return ptr.ToIntMap(vs)
}
// ToInt8 returns int8 value dereferenced if the passed
// in pointer was not nil. Returns a int8 zero value if the
// pointer was nil.
func ToInt8(p *int8) (v int8) {
return ptr.ToInt8(p)
}
// ToInt8Slice returns a slice of int8 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int8
// zero value if the pointer was nil.
func ToInt8Slice(vs []*int8) []int8 {
return ptr.ToInt8Slice(vs)
}
// ToInt8Map returns a map of int8 values, that are
// dereferenced if the passed in pointer was not nil. The int8
// zero value is used if the pointer was nil.
func ToInt8Map(vs map[string]*int8) map[string]int8 {
return ptr.ToInt8Map(vs)
}
// ToInt16 returns int16 value dereferenced if the passed
// in pointer was not nil. Returns a int16 zero value if the
// pointer was nil.
func ToInt16(p *int16) (v int16) {
return ptr.ToInt16(p)
}
// ToInt16Slice returns a slice of int16 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int16
// zero value if the pointer was nil.
func ToInt16Slice(vs []*int16) []int16 {
return ptr.ToInt16Slice(vs)
}
// ToInt16Map returns a map of int16 values, that are
// dereferenced if the passed in pointer was not nil. The int16
// zero value is used if the pointer was nil.
func ToInt16Map(vs map[string]*int16) map[string]int16 {
return ptr.ToInt16Map(vs)
}
// ToInt32 returns int32 value dereferenced if the passed
// in pointer was not nil. Returns a int32 zero value if the
// pointer was nil.
func ToInt32(p *int32) (v int32) {
return ptr.ToInt32(p)
}
// ToInt32Slice returns a slice of int32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int32
// zero value if the pointer was nil.
func ToInt32Slice(vs []*int32) []int32 {
return ptr.ToInt32Slice(vs)
}
// ToInt32Map returns a map of int32 values, that are
// dereferenced if the passed in pointer was not nil. The int32
// zero value is used if the pointer was nil.
func ToInt32Map(vs map[string]*int32) map[string]int32 {
return ptr.ToInt32Map(vs)
}
// ToInt64 returns int64 value dereferenced if the passed
// in pointer was not nil. Returns a int64 zero value if the
// pointer was nil.
func ToInt64(p *int64) (v int64) {
return ptr.ToInt64(p)
}
// ToInt64Slice returns a slice of int64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a int64
// zero value if the pointer was nil.
func ToInt64Slice(vs []*int64) []int64 {
return ptr.ToInt64Slice(vs)
}
// ToInt64Map returns a map of int64 values, that are
// dereferenced if the passed in pointer was not nil. The int64
// zero value is used if the pointer was nil.
func ToInt64Map(vs map[string]*int64) map[string]int64 {
return ptr.ToInt64Map(vs)
}
// ToUint returns uint value dereferenced if the passed
// in pointer was not nil. Returns a uint zero value if the
// pointer was nil.
func ToUint(p *uint) (v uint) {
return ptr.ToUint(p)
}
// ToUintSlice returns a slice of uint values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint
// zero value if the pointer was nil.
func ToUintSlice(vs []*uint) []uint {
return ptr.ToUintSlice(vs)
}
// ToUintMap returns a map of uint values, that are
// dereferenced if the passed in pointer was not nil. The uint
// zero value is used if the pointer was nil.
func ToUintMap(vs map[string]*uint) map[string]uint {
return ptr.ToUintMap(vs)
}
// ToUint8 returns uint8 value dereferenced if the passed
// in pointer was not nil. Returns a uint8 zero value if the
// pointer was nil.
func ToUint8(p *uint8) (v uint8) {
return ptr.ToUint8(p)
}
// ToUint8Slice returns a slice of uint8 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint8
// zero value if the pointer was nil.
func ToUint8Slice(vs []*uint8) []uint8 {
return ptr.ToUint8Slice(vs)
}
// ToUint8Map returns a map of uint8 values, that are
// dereferenced if the passed in pointer was not nil. The uint8
// zero value is used if the pointer was nil.
func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
return ptr.ToUint8Map(vs)
}
// ToUint16 returns uint16 value dereferenced if the passed
// in pointer was not nil. Returns a uint16 zero value if the
// pointer was nil.
func ToUint16(p *uint16) (v uint16) {
return ptr.ToUint16(p)
}
// ToUint16Slice returns a slice of uint16 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint16
// zero value if the pointer was nil.
func ToUint16Slice(vs []*uint16) []uint16 {
return ptr.ToUint16Slice(vs)
}
// ToUint16Map returns a map of uint16 values, that are
// dereferenced if the passed in pointer was not nil. The uint16
// zero value is used if the pointer was nil.
func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
return ptr.ToUint16Map(vs)
}
// ToUint32 returns uint32 value dereferenced if the passed
// in pointer was not nil. Returns a uint32 zero value if the
// pointer was nil.
func ToUint32(p *uint32) (v uint32) {
return ptr.ToUint32(p)
}
// ToUint32Slice returns a slice of uint32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint32
// zero value if the pointer was nil.
func ToUint32Slice(vs []*uint32) []uint32 {
return ptr.ToUint32Slice(vs)
}
// ToUint32Map returns a map of uint32 values, that are
// dereferenced if the passed in pointer was not nil. The uint32
// zero value is used if the pointer was nil.
func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
return ptr.ToUint32Map(vs)
}
// ToUint64 returns uint64 value dereferenced if the passed
// in pointer was not nil. Returns a uint64 zero value if the
// pointer was nil.
func ToUint64(p *uint64) (v uint64) {
return ptr.ToUint64(p)
}
// ToUint64Slice returns a slice of uint64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a uint64
// zero value if the pointer was nil.
func ToUint64Slice(vs []*uint64) []uint64 {
return ptr.ToUint64Slice(vs)
}
// ToUint64Map returns a map of uint64 values, that are
// dereferenced if the passed in pointer was not nil. The uint64
// zero value is used if the pointer was nil.
func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
return ptr.ToUint64Map(vs)
}
// ToFloat32 returns float32 value dereferenced if the passed
// in pointer was not nil. Returns a float32 zero value if the
// pointer was nil.
func ToFloat32(p *float32) (v float32) {
return ptr.ToFloat32(p)
}
// ToFloat32Slice returns a slice of float32 values, that are
// dereferenced if the passed in pointer was not nil. Returns a float32
// zero value if the pointer was nil.
func ToFloat32Slice(vs []*float32) []float32 {
return ptr.ToFloat32Slice(vs)
}
// ToFloat32Map returns a map of float32 values, that are
// dereferenced if the passed in pointer was not nil. The float32
// zero value is used if the pointer was nil.
func ToFloat32Map(vs map[string]*float32) map[string]float32 {
return ptr.ToFloat32Map(vs)
}
// ToFloat64 returns float64 value dereferenced if the passed
// in pointer was not nil. Returns a float64 zero value if the
// pointer was nil.
func ToFloat64(p *float64) (v float64) {
return ptr.ToFloat64(p)
}
// ToFloat64Slice returns a slice of float64 values, that are
// dereferenced if the passed in pointer was not nil. Returns a float64
// zero value if the pointer was nil.
func ToFloat64Slice(vs []*float64) []float64 {
return ptr.ToFloat64Slice(vs)
}
// ToFloat64Map returns a map of float64 values, that are
// dereferenced if the passed in pointer was not nil. The float64
// zero value is used if the pointer was nil.
func ToFloat64Map(vs map[string]*float64) map[string]float64 {
return ptr.ToFloat64Map(vs)
}
// ToTime returns time.Time value dereferenced if the passed
// in pointer was not nil. Returns a time.Time zero value if the
// pointer was nil.
func ToTime(p *time.Time) (v time.Time) {
return ptr.ToTime(p)
}
// ToTimeSlice returns a slice of time.Time values, that are
// dereferenced if the passed in pointer was not nil. Returns a time.Time
// zero value if the pointer was nil.
func ToTimeSlice(vs []*time.Time) []time.Time {
return ptr.ToTimeSlice(vs)
}
// ToTimeMap returns a map of time.Time values, that are
// dereferenced if the passed in pointer was not nil. The time.Time
// zero value is used if the pointer was nil.
func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
return ptr.ToTimeMap(vs)
}
// ToDuration returns time.Duration value dereferenced if the passed
// in pointer was not nil. Returns a time.Duration zero value if the
// pointer was nil.
func ToDuration(p *time.Duration) (v time.Duration) {
return ptr.ToDuration(p)
}
// ToDurationSlice returns a slice of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. Returns a time.Duration
// zero value if the pointer was nil.
func ToDurationSlice(vs []*time.Duration) []time.Duration {
return ptr.ToDurationSlice(vs)
}
// ToDurationMap returns a map of time.Duration values, that are
// dereferenced if the passed in pointer was not nil. The time.Duration
// zero value is used if the pointer was nil.
func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
return ptr.ToDurationMap(vs)
}

Some files were not shown because too many files have changed in this diff Show More