mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-05-28 17:46:41 +00:00
Merge pull request #35 from ceph/devel
Sync rhs/ceph-csi:devel with ceph/ceph-csi:devel
This commit is contained in:
commit
f0b4ccffd3
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -13,7 +13,7 @@ A clear and concise description of what the bug is.
|
||||
- Image/version of Ceph CSI driver :
|
||||
- Helm chart version :
|
||||
- Kernel version :
|
||||
- Mounter used for mounting PVC (for cephfs its `fuse` or `kernel`. for rbd its
|
||||
- Mounter used for mounting PVC (for cephFS its `fuse` or `kernel`. for rbd its
|
||||
`krbd` or `rbd-nbd`) :
|
||||
- Kubernetes cluster version :
|
||||
- Ceph cluster version :
|
||||
@ -61,7 +61,7 @@ If the issue is in PVC mounting please attach complete logs of below containers.
|
||||
- if required attach dmesg logs.
|
||||
|
||||
**Note:-** If its a rbd issue please provide only rbd related logs, if its a
|
||||
cephfs issue please provide cephfs logs.
|
||||
cephFS issue please provide cephFS logs.
|
||||
|
||||
# Additional context #
|
||||
|
||||
|
7
.github/workflows/go-test.yaml
vendored
7
.github/workflows/go-test.yaml
vendored
@ -13,3 +13,10 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- name: go-test
|
||||
run: CONTAINER_CMD=docker make containerized-test TARGET=go-test
|
||||
go-test-api:
|
||||
name: go-test-api
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: go-test-api
|
||||
run: CONTAINER_CMD=docker make containerized-test TARGET=go-test-api
|
||||
|
@ -49,6 +49,8 @@ pull_request_rules:
|
||||
- "status-success=gosec"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
@ -78,6 +80,8 @@ pull_request_rules:
|
||||
- "status-success=commitlint"
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
@ -106,6 +110,8 @@ pull_request_rules:
|
||||
- "status-success=mod-check"
|
||||
- "status-success=lint-extras"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.20"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||
|
11
Makefile
11
Makefile
@ -102,6 +102,9 @@ go-test: GO_COVER_DIR ?= $(shell . $(CURDIR)/build.env ; echo $${GO_COVER_DIR})
|
||||
go-test: check-env
|
||||
TEST_COVERAGE="$(TEST_COVERAGE)" GO_COVER_DIR="$(GO_COVER_DIR)" GO_TAGS="$(GO_TAGS)" ./scripts/test-go.sh
|
||||
|
||||
go-test-api: check-env
|
||||
@pushd api && ../scripts/test-go.sh && popd
|
||||
|
||||
mod-check: check-env
|
||||
@echo 'running: go mod verify'
|
||||
@go mod verify && [ "$(shell sha512sum go.mod)" = "`sha512sum go.mod`" ] || ( echo "ERROR: go.mod was modified by 'go mod verify'" && false )
|
||||
@ -160,6 +163,14 @@ cephcsi: check-env
|
||||
e2e.test: check-env
|
||||
go test $(GO_TAGS) -mod=vendor -c ./e2e
|
||||
|
||||
#
|
||||
# Update the generated deploy/ files when the template changed. This requires
|
||||
# running 'go mod vendor' so update the API files under the vendor/ directory.
|
||||
.PHONY: generate-deploy
|
||||
generate-deploy:
|
||||
go mod vendor
|
||||
$(MAKE) -C deploy
|
||||
|
||||
#
|
||||
# e2e testing by compiling e2e.test in case it does not exist and running the
|
||||
# executable. The e2e.test executable is not checked as a dependency in the
|
||||
|
@ -35,7 +35,7 @@ Independent CSI plugins are provided to support RBD and CephFS backed volumes,
|
||||
- For details about configuration and deployment of RBD plugin, please refer
|
||||
[rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) and
|
||||
for CephFS plugin configuration and deployment please
|
||||
refer [cephfs doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
|
||||
refer [cephFS doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
|
||||
- For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
|
||||
- Stale resource cleanup, please refer [cleanup doc](docs/resource-cleanup.md).
|
||||
|
||||
@ -158,8 +158,8 @@ More details are available [here](https://github.com/ceph/ceph-csi/issues/463)
|
||||
|
||||
## Dev standup
|
||||
|
||||
A regular dev standup takes place every other [Monday,Tuesday,Thursday at
|
||||
12:00 PM UTC](https://redhat.bluejeans.com/702977652). Convert to your local
|
||||
A regular dev standup takes place every [Monday,Tuesday and Thursday at
|
||||
12:00 PM UTC](https://meet.google.com/nnn-txfp-cge). Convert to your local
|
||||
timezone by executing command `date -d "12:00 UTC"` on terminal
|
||||
|
||||
Any changes to the meeting schedule will be added to the [agenda
|
||||
@ -169,7 +169,7 @@ Anyone who wants to discuss the direction of the project, design and
|
||||
implementation reviews, or general questions with the broader community is
|
||||
welcome and encouraged to join.
|
||||
|
||||
- Meeting link: <https://redhat.bluejeans.com/702977652>
|
||||
- Meeting link: <https://meet.google.com/nnn-txfp-cge>
|
||||
- [Current agenda](https://docs.google.com/document/d/1K1aerdMpraIh56-skdoEoVF9RZrO4NUcbHtjN-f3u1s)
|
||||
|
||||
## Contact
|
||||
|
20
api/deploy/doc.go
Normal file
20
api/deploy/doc.go
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package deploy contains functions to obtain standard and recommended
|
||||
// deployment artifacts for different container platforms. These artifacts can
|
||||
// be used by automation tools that want to deploy Ceph-CSI.
|
||||
package deploy
|
20
api/deploy/ocp/doc.go
Normal file
20
api/deploy/ocp/doc.go
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ocp contains functions to obtain standard and recommended
|
||||
// deployment artifacts for OpenShift. These artifacts can be used by
|
||||
// automation tools that want to deploy Ceph-CSI.
|
||||
package ocp
|
107
api/deploy/ocp/scc.go
Normal file
107
api/deploy/ocp/scc.go
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
secv1 "github.com/openshift/api/security/v1"
|
||||
)
|
||||
|
||||
//go:embed scc.yaml
|
||||
var securityContextConstraints string
|
||||
|
||||
// SecurityContextConstraintsValues contains values that need replacing in the
|
||||
// template.
|
||||
type SecurityContextConstraintsValues struct {
|
||||
// Namespace contains the OpenShift Namespace where the SCC will be
|
||||
// used.
|
||||
Namespace string
|
||||
// Deployer refers to the Operator that creates the SCC and
|
||||
// ServiceAccounts. This is an optional option.
|
||||
Deployer string
|
||||
}
|
||||
|
||||
// SecurityContextConstraintsDefaults can be used for generating deployment
|
||||
// artifacts with defails values.
|
||||
var SecurityContextConstraintsDefaults = SecurityContextConstraintsValues{
|
||||
Namespace: "ceph-csi",
|
||||
Deployer: "",
|
||||
}
|
||||
|
||||
// NewSecurityContextConstraints creates a new SecurityContextConstraints
|
||||
// object by replacing variables in the template by the values set in the
|
||||
// SecurityContextConstraintsValues.
|
||||
//
|
||||
// The deployer parameter (when not an empty string) is used as a prefix for
|
||||
// the name of the SCC and the linked ServiceAccounts.
|
||||
func NewSecurityContextConstraints(values SecurityContextConstraintsValues) (*secv1.SecurityContextConstraints, error) {
|
||||
data, err := NewSecurityContextConstraintsYAML(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scc := &secv1.SecurityContextConstraints{}
|
||||
err = yaml.Unmarshal([]byte(data), scc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert YAML to %T: %w", scc, err)
|
||||
}
|
||||
|
||||
return scc, nil
|
||||
}
|
||||
|
||||
// internalSecurityContextConstraintsValues extends
|
||||
// SecurityContextConstraintsValues with some private attributes that may get
|
||||
// set based on other values.
|
||||
type internalSecurityContextConstraintsValues struct {
|
||||
SecurityContextConstraintsValues
|
||||
|
||||
// Prefix is based on SecurityContextConstraintsValues.Deployer.
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// NewSecurityContextConstraintsYAML returns a YAML string where the variables
|
||||
// in the template have been replaced by the values set in the
|
||||
// SecurityContextConstraintsValues.
|
||||
func NewSecurityContextConstraintsYAML(values SecurityContextConstraintsValues) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// internalValues is a copy of values, but will get extended with
|
||||
// API-internal values
|
||||
internalValues := internalSecurityContextConstraintsValues{
|
||||
SecurityContextConstraintsValues: values,
|
||||
}
|
||||
|
||||
if internalValues.Deployer != "" {
|
||||
internalValues.Prefix = internalValues.Deployer + "-"
|
||||
}
|
||||
|
||||
tmpl, err := template.New("SCC").Parse(securityContextConstraints)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template: %w", err)
|
||||
}
|
||||
err = tmpl.Execute(&buf, internalValues)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to replace values in template: %w", err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
43
api/deploy/ocp/scc.yaml
Normal file
43
api/deploy/ocp/scc.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
kind: SecurityContextConstraints
|
||||
apiVersion: security.openshift.io/v1
|
||||
metadata:
|
||||
name: "{{ .Prefix }}ceph-csi"
|
||||
# To allow running privilegedContainers
|
||||
allowPrivilegedContainer: true
|
||||
# CSI daemonset pod needs hostnetworking
|
||||
allowHostNetwork: true
|
||||
# This need to be set to true as we use HostPath
|
||||
allowHostDirVolumePlugin: true
|
||||
priority:
|
||||
# SYS_ADMIN is needed for rbd to execture rbd map command
|
||||
allowedCapabilities: ["SYS_ADMIN"]
|
||||
# Needed as we run liveness container on daemonset pods
|
||||
allowHostPorts: true
|
||||
# Needed as we are setting this in RBD plugin pod
|
||||
allowHostPID: true
|
||||
# Required for encryption
|
||||
allowHostIPC: true
|
||||
# Set to false as we write to RootFilesystem inside csi containers
|
||||
readOnlyRootFilesystem: false
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
fsGroup:
|
||||
type: RunAsAny
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
# The type of volumes which are mounted to csi pods
|
||||
volumes:
|
||||
- configMap
|
||||
- projected
|
||||
- emptyDir
|
||||
- hostPath
|
||||
users:
|
||||
# A user needs to be added for each service account.
|
||||
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-rbd-plugin-sa"
|
||||
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-rbd-provisioner-sa"
|
||||
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-cephfs-plugin-sa"
|
||||
# yamllint disable-line rule:line-length
|
||||
- "system:serviceaccount:{{ .Namespace }}:{{ .Prefix }}csi-cephfs-provisioner-sa"
|
91
api/deploy/ocp/scc_test.go
Normal file
91
api/deploy/ocp/scc_test.go
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
secv1 "github.com/openshift/api/security/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewSecurityContextConstraints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("SecurityContextConstraintsDefaults", func(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
scc *secv1.SecurityContextConstraints
|
||||
)
|
||||
|
||||
getSCC := func() {
|
||||
scc, err = NewSecurityContextConstraints(SecurityContextConstraintsDefaults)
|
||||
}
|
||||
|
||||
require.NotPanics(t, getSCC)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, scc)
|
||||
|
||||
require.Equal(t, scc.Name, "ceph-csi")
|
||||
for _, user := range scc.Users {
|
||||
require.True(t, strings.HasPrefix(user, "system:serviceaccount:ceph-csi:csi"))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DeployerRook", func(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
scc *secv1.SecurityContextConstraints
|
||||
)
|
||||
|
||||
rookValues := SecurityContextConstraintsValues{
|
||||
Namespace: "rook-ceph",
|
||||
Deployer: "rook",
|
||||
}
|
||||
|
||||
getSCC := func() {
|
||||
scc, err = NewSecurityContextConstraints(rookValues)
|
||||
}
|
||||
|
||||
require.NotPanics(t, getSCC)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, scc)
|
||||
|
||||
require.Equal(t, scc.Name, "rook-ceph-csi")
|
||||
for _, user := range scc.Users {
|
||||
require.True(t, strings.HasPrefix(user, "system:serviceaccount:rook-ceph:rook-csi"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewSecurityContextConstraintsYAML(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
err error
|
||||
yaml string
|
||||
)
|
||||
|
||||
getYAML := func() {
|
||||
yaml, err = NewSecurityContextConstraintsYAML(SecurityContextConstraintsDefaults)
|
||||
}
|
||||
|
||||
require.NotPanics(t, getYAML)
|
||||
require.Nil(t, err)
|
||||
require.NotEqual(t, "", yaml)
|
||||
}
|
20
api/doc.go
Normal file
20
api/doc.go
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package api contains the public consumable pieces from the Ceph-CSI project.
|
||||
// The contents is mostly aimed at providing deployment artifacts for different
|
||||
// container platforms.
|
||||
package api
|
9
api/go.mod
Normal file
9
api/go.mod
Normal file
@ -0,0 +1,9 @@
|
||||
module github.com/ceph/ceph-csi/api
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda
|
||||
github.com/stretchr/testify v1.7.0
|
||||
)
|
257
api/go.sum
Normal file
257
api/go.sum
Normal file
@ -0,0 +1,257 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
|
||||
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
|
||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
|
||||
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda h1:VoJmrqbFDuqzjlByItbjx/HxmReK4LC+X3Jt2Wv2Ogs=
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8=
|
||||
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
@ -39,7 +39,7 @@ SNAPSHOT_VERSION=v4.0.0
|
||||
HELM_VERSION=v3.1.2
|
||||
|
||||
# minikube settings
|
||||
MINIKUBE_VERSION=v1.23.0
|
||||
MINIKUBE_VERSION=v1.23.2
|
||||
VM_DRIVER=none
|
||||
CHANGE_MINIKUBE_NONE_USER=true
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# ceph-csi-cephfs
|
||||
|
||||
The ceph-csi-cephfs chart adds cephfs volume support to your cluster.
|
||||
The ceph-csi-cephfs chart adds cephFS volume support to your cluster.
|
||||
|
||||
## Install from release repo
|
||||
|
||||
@ -134,12 +134,12 @@ charts and their default values.
|
||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
|
||||
| `storageClass.name` | Specifies the cephfs StorageClass name | `csi-cephfs-sc` |
|
||||
| `storageClass.name` | Specifies the cephFS StorageClass name | `csi-cephfs-sc` |
|
||||
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
|
||||
| `storageClass.fsName` | CephFS filesystem name into which the volume shall be created | `myfs` |
|
||||
| `storageClass.pool` | Ceph pool into which volume data shall be stored | `""` |
|
||||
| `storageClass.fuseMountOptions` | Comma separated string of Ceph-fuse mount options | `""` |
|
||||
| `storageclass.kernelMountOptions` | Comma separated string of Cephfs kernel mount options | `""` |
|
||||
| `storageclass.kernelMountOptions` | Comma separated string of CephFS kernel mount options | `""` |
|
||||
| `storageClass.mounter` | The driver can use either ceph-fuse (fuse) or ceph kernelclient (kernel) | `""` |
|
||||
| `storageClass.volumeNamePrefix` | Prefix to use for naming subvolumes | `""` |
|
||||
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-cephfs-secret` |
|
||||
@ -152,8 +152,8 @@ charts and their default values.
|
||||
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
|
||||
| `storageClass.mountOptions` | Specifies the mount options | `[]` |
|
||||
| `secret.create` | Specifies whether the secret should be created | `false` |
|
||||
| `secret.name` | Specifies the cephfs secret name | `csi-cephfs-secret` |
|
||||
| `secret.adminID` | Specifies the admin ID of the cephfs secret | `<plaintext ID>` |
|
||||
| `secret.name` | Specifies the cephFS secret name | `csi-cephfs-secret` |
|
||||
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
|
||||
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
|
||||
|
||||
### Command Line
|
||||
|
@ -29,7 +29,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -84,8 +84,6 @@ spec:
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -158,11 +156,6 @@ spec:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
|
@ -10,12 +10,8 @@ metadata:
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- 'SYS_ADMIN'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
@ -27,7 +23,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -138,7 +138,7 @@ charts and their default values.
|
||||
| `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` |
|
||||
| `pluginSocketFile` | The filename of the plugin socket | `csi.sock` |
|
||||
| `kubeletDir` | kubelet working directory | `/var/lib/kubelet` |
|
||||
| `cephLogDir` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` |
|
||||
| `cephLogDirHostPath` | Host path location for ceph client processes logging, ex: rbd-nbd | `/var/log/ceph` |
|
||||
| `driverName` | Name of the csi-driver | `rbd.csi.ceph.com` |
|
||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||
@ -151,6 +151,8 @@ charts and their default values.
|
||||
| `storageClass.thickProvision` | Specifies whether thick provision should be enabled | `false` |
|
||||
| `storageclass.imageFeatures` | Specifies RBD image features | `layering` |
|
||||
| `storageClass.mounter` | Specifies RBD mounter | `""` |
|
||||
| `storageClass.cephLogDir` | ceph client log location, it is the target bindmount path used inside container | `"/var/log/ceph"` |
|
||||
| `storageClass.cephLogStrategy` | ceph client log strategy, available options `remove` or `compress` or `preserve` | `"remove"` |
|
||||
| `storageClass.volumeNamePrefix` | Prefix to use for naming RBD images | `""` |
|
||||
| `storageClass.encrypted` | Specifies whether volume should be encrypted. Set it to true if you want to enable encryption | `""` |
|
||||
| `storageClass.encryptionKMSID` | Specifies the encryption kms id | `""` |
|
||||
|
@ -175,7 +175,7 @@ spec:
|
||||
type: DirectoryOrCreate
|
||||
- name: ceph-logdir
|
||||
hostPath:
|
||||
path: {{ .Values.cephLogDir }}
|
||||
path: {{ .Values.cephLogDirHostPath }}
|
||||
type: DirectoryOrCreate
|
||||
- name: host-dev
|
||||
hostPath:
|
||||
|
@ -29,7 +29,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
@ -42,7 +41,7 @@ spec:
|
||||
readOnly: true
|
||||
- pathPrefix: '/lib/modules'
|
||||
readOnly: true
|
||||
- pathPrefix: '{{ .Values.cephLogDir }}'
|
||||
- pathPrefix: '{{ .Values.cephLogDirHostPath }}'
|
||||
readOnly: false
|
||||
- pathPrefix: '{{ .Values.kubeletDir }}'
|
||||
readOnly: false
|
||||
|
@ -105,8 +105,6 @@ spec:
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -164,11 +162,6 @@ spec:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: "unix:///csi/{{ .Values.provisionerSocketFile }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -205,11 +198,6 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
- name: DRIVER_NAME
|
||||
value: {{ .Values.driverName }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
volumeMounts:
|
||||
- name: ceph-csi-config
|
||||
mountPath: /etc/ceph-csi-config/
|
||||
|
@ -10,12 +10,8 @@ metadata:
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- 'SYS_ADMIN'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
@ -27,7 +23,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -18,6 +18,12 @@ parameters:
|
||||
{{- if .Values.storageClass.mounter }}
|
||||
mounter: {{ .Values.storageClass.mounter }}
|
||||
{{- end }}
|
||||
{{- if .Values.storageClass.cephLogDir }}
|
||||
cephLogDir: {{ .Values.storageClass.cephLogDir }}
|
||||
{{- end }}
|
||||
{{- if .Values.storageClass.cephLogStrategy }}
|
||||
cephLogStrategy: {{ .Values.storageClass.cephLogStrategy }}
|
||||
{{- end }}
|
||||
{{- if .Values.storageClass.dataPool }}
|
||||
dataPool: {{ .Values.storageClass.dataPool }}
|
||||
{{- end }}
|
||||
|
@ -287,6 +287,22 @@ storageClass:
|
||||
# mounter: rbd-nbd
|
||||
mounter: ""
|
||||
|
||||
# (optional) ceph client log location, eg: rbd-nbd
|
||||
# By default host-path /var/log/ceph of node is bind-mounted into
|
||||
# csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
|
||||
# target bindmount path used inside container for ceph clients logging.
|
||||
# See docs/rbd-nbd.md for available configuration options.
|
||||
# cephLogDir: /var/log/ceph
|
||||
cephLogDir: ""
|
||||
|
||||
# (optional) ceph client log strategy
|
||||
# By default, log file belonging to a particular volume will be deleted
|
||||
# on unmap, but you can choose to just compress instead of deleting it
|
||||
# or even preserve the log file in text format as it is.
|
||||
# Available options `remove` or `compress` or `preserve`
|
||||
# cephLogStrategy: remove
|
||||
cephLogStrategy: ""
|
||||
|
||||
# (optional) Prefix to use for naming RBD images.
|
||||
# If omitted, defaults to "csi-vol-".
|
||||
# volumeNamePrefix: "foo-bar-"
|
||||
@ -403,7 +419,7 @@ pluginSocketFile: csi.sock
|
||||
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
|
||||
kubeletDir: /var/lib/kubelet
|
||||
# Host path location for ceph client processes logging, ex: rbd-nbd
|
||||
cephLogDir: /var/log/ceph
|
||||
cephLogDirHostPath: /var/log/ceph
|
||||
# Name of the csi-driver
|
||||
driverName: rbd.csi.ceph.com
|
||||
# Name of the configmap used for state
|
||||
|
@ -36,12 +36,12 @@ import (
|
||||
|
||||
const (
|
||||
rbdType = "rbd"
|
||||
cephfsType = "cephfs"
|
||||
cephFSType = "cephfs"
|
||||
livenessType = "liveness"
|
||||
controllerType = "controller"
|
||||
|
||||
rbdDefaultName = "rbd.csi.ceph.com"
|
||||
cephfsDefaultName = "cephfs.csi.ceph.com"
|
||||
cephFSDefaultName = "cephfs.csi.ceph.com"
|
||||
livenessDefaultName = "liveness.csi.ceph.com"
|
||||
|
||||
pollTime = 60 // seconds
|
||||
@ -144,8 +144,8 @@ func getDriverName() string {
|
||||
switch conf.Vtype {
|
||||
case rbdType:
|
||||
return rbdDefaultName
|
||||
case cephfsType:
|
||||
return cephfsDefaultName
|
||||
case cephFSType:
|
||||
return cephFSDefaultName
|
||||
case livenessType:
|
||||
return livenessDefaultName
|
||||
default:
|
||||
@ -210,6 +210,10 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if err = util.WriteCephConfig(); err != nil {
|
||||
log.FatalLogMsg("failed to write ceph configuration file (%v)", err)
|
||||
}
|
||||
|
||||
log.DefaultLog("Starting driver type: %v with name: %v", conf.Vtype, dname)
|
||||
switch conf.Vtype {
|
||||
case rbdType:
|
||||
@ -218,7 +222,7 @@ func main() {
|
||||
driver := rbd.NewDriver()
|
||||
driver.Run(&conf)
|
||||
|
||||
case cephfsType:
|
||||
case cephFSType:
|
||||
driver := cephfs.NewDriver()
|
||||
driver.Run(&conf)
|
||||
|
||||
|
19
deploy/Makefile
Normal file
19
deploy/Makefile
Normal file
@ -0,0 +1,19 @@
|
||||
# Copyright 2021 The Ceph-CSI Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: all
|
||||
all: scc.yaml
|
||||
|
||||
scc.yaml: ../api/deploy/ocp/scc.yaml ../api/deploy/ocp/scc.go
|
||||
$(MAKE) -C ../tools generate-deploy
|
@ -86,8 +86,6 @@ spec:
|
||||
- name: ADDRESS
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -106,10 +104,6 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-cephfsplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
# for stable functionality replace canary with latest release version
|
||||
image: quay.io/cephcsi/cephcsi:canary
|
||||
args:
|
||||
|
@ -23,7 +23,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -4,12 +4,8 @@ kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: cephfs-csi-provisioner-psp
|
||||
spec:
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- 'SYS_ADMIN'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
@ -21,7 +17,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -22,9 +22,8 @@ spec:
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
readOnly: false
|
||||
|
@ -4,12 +4,8 @@ kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: rbd-csi-provisioner-psp
|
||||
spec:
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- 'SYS_ADMIN'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
privileged: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
@ -21,7 +17,6 @@ spec:
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'hostPath'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: '/dev'
|
||||
|
@ -77,8 +77,6 @@ spec:
|
||||
- name: ADDRESS
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -113,10 +111,6 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-rbdplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
# for stable functionality replace canary with latest release version
|
||||
image: quay.io/cephcsi/cephcsi:canary
|
||||
args:
|
||||
@ -167,10 +161,6 @@ spec:
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph/
|
||||
- name: csi-rbdplugin-controller
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
# for stable functionality replace canary with latest release version
|
||||
image: quay.io/cephcsi/cephcsi:canary
|
||||
args:
|
||||
|
51
deploy/scc.yaml
Normal file
51
deploy/scc.yaml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
#
|
||||
# /!\ DO NOT MODIFY THIS FILE
|
||||
#
|
||||
# This file has been automatically generated by Ceph-CSI yamlgen.
|
||||
# The source for the contents can be found in the api/deploy directory, make
|
||||
# your modifications there.
|
||||
#
|
||||
---
|
||||
kind: SecurityContextConstraints
|
||||
apiVersion: security.openshift.io/v1
|
||||
metadata:
|
||||
name: "ceph-csi"
|
||||
# To allow running privilegedContainers
|
||||
allowPrivilegedContainer: true
|
||||
# CSI daemonset pod needs hostnetworking
|
||||
allowHostNetwork: true
|
||||
# This need to be set to true as we use HostPath
|
||||
allowHostDirVolumePlugin: true
|
||||
priority:
|
||||
# SYS_ADMIN is needed for rbd to execture rbd map command
|
||||
allowedCapabilities: ["SYS_ADMIN"]
|
||||
# Needed as we run liveness container on daemonset pods
|
||||
allowHostPorts: true
|
||||
# Needed as we are setting this in RBD plugin pod
|
||||
allowHostPID: true
|
||||
# Required for encryption
|
||||
allowHostIPC: true
|
||||
# Set to false as we write to RootFilesystem inside csi containers
|
||||
readOnlyRootFilesystem: false
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
fsGroup:
|
||||
type: RunAsAny
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
# The type of volumes which are mounted to csi pods
|
||||
volumes:
|
||||
- configMap
|
||||
- projected
|
||||
- emptyDir
|
||||
- hostPath
|
||||
users:
|
||||
# A user needs to be added for each service account.
|
||||
- "system:serviceaccount:ceph-csi:csi-rbd-plugin-sa"
|
||||
- "system:serviceaccount:ceph-csi:csi-rbd-provisioner-sa"
|
||||
- "system:serviceaccount:ceph-csi:csi-cephfs-plugin-sa"
|
||||
# yamllint disable-line rule:line-length
|
||||
- "system:serviceaccount:ceph-csi:csi-cephfs-provisioner-sa"
|
@ -31,38 +31,48 @@ in the StorageClass.
|
||||
|
||||
### Configuring logging path
|
||||
|
||||
If you are using the default rbd nodeplugin daemonset and StorageClass
|
||||
If you are using the default rbd nodePlugin DaemonSet and StorageClass
|
||||
templates then `cephLogDir` will be `/var/log/ceph`, this directory will be
|
||||
a host-path and the default log file path will be
|
||||
`/var/log/ceph/rbd-nbd-<volID>.log`. rbd-nbd creates a log file per volume
|
||||
under the `cephLogDir` path on NodeStage(map) and removed the same on
|
||||
the respective NodeUnstage(unmap).
|
||||
|
||||
In case if you need a customized log path, you should do below:
|
||||
- There are different strategies to maintain the logs
|
||||
- `remove`: delete log file on unmap/detach (default behaviour)
|
||||
- `compress`: compress the log file to gzip on unmap/detach, in case there
|
||||
exists a `.gz` file from previous map/unmap of the same volume, then
|
||||
override the previous log with new log.
|
||||
- `preserve`: preserve the log file in text format
|
||||
|
||||
- Edit the daemonset templates to change the `cephLogDir`
|
||||
- If you are using helm charts, then you can use key `cephLogDir`
|
||||
You can tweak the log strategies through `cephLogStrategy` option from the
|
||||
storageclass yaml
|
||||
|
||||
```
|
||||
helm install --set cephLogDir=/var/log/ceph-csi/my-dir
|
||||
```
|
||||
- In case if you need a customized log path, you should do below:
|
||||
|
||||
- For standard templates edit [csi-rbdplugin.yaml](../deploy/rbd/kubernetes/csi-rbdplugin.yaml)
|
||||
to update `hostPath` for `ceph-logdir`, also edit psp [csi-nodeplugin-psp.yaml](../deploy/rbd/kubernetes/csi-nodeplugin-psp.yaml)
|
||||
to update `pathPrefix` spec entries.
|
||||
- Update the StorageClass with the customized log directory path
|
||||
- Now update rbd StorageClass for `cephLogDir`, for example
|
||||
- Edit the DaemonSet templates to change the ceph log directory host-path
|
||||
- If you are using helm charts, then you can use key `cephLogDirHostPath`
|
||||
|
||||
```
|
||||
cephLogDir: "/var/log/prod-A-logs"
|
||||
```
|
||||
```
|
||||
helm install --set cephLogDirHostPath=/var/log/ceph-csi/my-dir
|
||||
```
|
||||
|
||||
- For standard templates edit [csi-rbdplugin.yaml](../deploy/rbd/kubernetes/csi-rbdplugin.yaml)
|
||||
to update `hostPath` for `ceph-logdir`, also edit psp [csi-nodeplugin-psp.yaml](../deploy/rbd/kubernetes/csi-nodeplugin-psp.yaml)
|
||||
to update `pathPrefix` spec entries.
|
||||
- Update the StorageClass with the customized log directory path
|
||||
- Now update rbd StorageClass for `cephLogDir`, for example
|
||||
|
||||
```
|
||||
cephLogDir: "/var/log/prod-A-logs"
|
||||
```
|
||||
|
||||
`NOTE`:
|
||||
|
||||
- On uninstall make sure to delete `cephLogDir` on host manually to freeup
|
||||
some space just in case if there are any uncleaned log files.
|
||||
- In case if you do not need the rbd-nbd logging to persistent, then just
|
||||
update the StorageClass for `cephLogDir` to use a non-persistent path.
|
||||
- In case if you do not need the rbd-nbd logging to persistent at all, then
|
||||
simply update the StorageClass for `cephLogDir` to use a non-persistent path.
|
||||
|
||||
## Status
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
If the PVC is created with storage class which is having the `reclaimPolicy`
|
||||
as `Retain` will not delete the PV object, backend omap metadata and backend image.
|
||||
Manual deletion of PV will result in stale omap keys, values,
|
||||
cephfs subvolume and rbd image.
|
||||
cephFS subvolume and rbd image.
|
||||
It is required to cleanup metadata and image separately.
|
||||
|
||||
## Steps
|
||||
@ -67,7 +67,7 @@ a. remove rbd image(csi-vol-omapval, the prefix csi-vol is value of [volumeNameP
|
||||
Removing image: 100% complete...done.
|
||||
```
|
||||
|
||||
b. remove cephfs subvolume(csi-vol-omapval)
|
||||
b. remove cephFS subvolume(csi-vol-omapval)
|
||||
|
||||
```
|
||||
ceph fs subvolume rm volume_name subvolume_name group_name
|
||||
|
@ -3,6 +3,7 @@
|
||||
- [End-to-End Testing](#end-to-end-testing)
|
||||
- [Introduction](#introduction)
|
||||
- [Install Kubernetes](#install-kubernetes)
|
||||
- [Deploy Rook](#deploy-rook)
|
||||
- [Test parameters](#test-parameters)
|
||||
- [E2E for snapshot](#e2e-for-snapshot)
|
||||
- [Running E2E](#running-e2e)
|
||||
@ -89,9 +90,9 @@ are available while running tests:
|
||||
| flag | description |
|
||||
| ----------------- | ----------------------------------------------------------------------------- |
|
||||
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
|
||||
| deploy-cephfs | Deploy cephfs csi driver as part of E2E (default: true) |
|
||||
| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) |
|
||||
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephfs csi driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephFS csi driver as part of E2E (default: true) |
|
||||
| upgrade-testing | Perform upgrade testing (default: false) |
|
||||
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
|
||||
| test-rbd | Test rbd csi driver as part of E2E (default: true) |
|
||||
|
133
e2e/cephfs.go
133
e2e/cephfs.go
@ -18,17 +18,17 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
cephfsProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
||||
cephfsProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
||||
cephfsProvisionerPSP = "csi-provisioner-psp.yaml"
|
||||
cephfsNodePlugin = "csi-cephfsplugin.yaml"
|
||||
cephfsNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
||||
cephfsNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
||||
cephfsDeploymentName = "csi-cephfsplugin-provisioner"
|
||||
cephfsDeamonSetName = "csi-cephfsplugin"
|
||||
cephfsContainerName = "csi-cephfsplugin"
|
||||
cephfsDirPath = "../deploy/cephfs/kubernetes/"
|
||||
cephfsExamplePath = examplePath + "cephfs/"
|
||||
cephFSProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
||||
cephFSProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
||||
cephFSProvisionerPSP = "csi-provisioner-psp.yaml"
|
||||
cephFSNodePlugin = "csi-cephfsplugin.yaml"
|
||||
cephFSNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
||||
cephFSNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
||||
cephFSDeploymentName = "csi-cephfsplugin-provisioner"
|
||||
cephFSDeamonSetName = "csi-cephfsplugin"
|
||||
cephFSContainerName = "csi-cephfsplugin"
|
||||
cephFSDirPath = "../deploy/cephfs/kubernetes/"
|
||||
cephFSExamplePath = examplePath + "cephfs/"
|
||||
subvolumegroup = "e2e"
|
||||
fileSystemName = "myfs"
|
||||
)
|
||||
@ -36,23 +36,23 @@ var (
|
||||
func deployCephfsPlugin() {
|
||||
// delete objects deployed by rook
|
||||
|
||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
|
||||
createORDeleteCephfsResources(kubectlCreate)
|
||||
@ -63,12 +63,12 @@ func deleteCephfsPlugin() {
|
||||
}
|
||||
|
||||
func createORDeleteCephfsResources(action kubectlAction) {
|
||||
csiDriver, err := ioutil.ReadFile(cephfsDirPath + csiDriverObject)
|
||||
csiDriver, err := ioutil.ReadFile(cephFSDirPath + csiDriverObject)
|
||||
if err != nil {
|
||||
// createORDeleteRbdResources is used for upgrade testing as csidriverObject is
|
||||
// newly added, discarding file not found error.
|
||||
if !os.IsNotExist(err) {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+csiDriverObject, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+csiDriverObject, err)
|
||||
}
|
||||
} else {
|
||||
err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout)
|
||||
@ -89,55 +89,55 @@ func createORDeleteCephfsResources(action kubectlAction) {
|
||||
e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err)
|
||||
}
|
||||
}
|
||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
|
||||
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisioner)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisioner, err)
|
||||
}
|
||||
data = oneReplicaDeployYaml(data)
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
|
||||
}
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerPSP)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerPSP, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePlugin)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePlugin, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginPSP)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginPSP, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
@ -200,7 +200,7 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
}
|
||||
err := createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -239,7 +239,7 @@ var _ = Describe("cephfs", func() {
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err := deleteConfigMap(cephfsDirPath)
|
||||
err := deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
@ -255,7 +255,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -272,25 +272,25 @@ var _ = Describe("cephfs", func() {
|
||||
|
||||
Context("Test CephFS CSI", func() {
|
||||
It("Test CephFS CSI", func() {
|
||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
||||
appPath := cephfsExamplePath + "pod.yaml"
|
||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||
appPath := cephFSExamplePath + "pod.yaml"
|
||||
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -302,11 +302,11 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
// Deleting the storageclass and secret created by helm
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "secret.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -314,7 +314,7 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
|
||||
By("check static PVC", func() {
|
||||
scPath := cephfsExamplePath + "secret.yaml"
|
||||
scPath := cephFSExamplePath + "secret.yaml"
|
||||
err := validateCephFsStaticPV(f, appPath, scPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
|
||||
@ -330,7 +330,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -378,7 +378,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -399,7 +399,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -418,7 +418,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -529,16 +529,21 @@ var _ = Describe("cephfs", func() {
|
||||
})
|
||||
|
||||
By("validate multiple subvolumegroup creation", func() {
|
||||
err := deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
|
||||
// re-define configmap with information of multiple clusters.
|
||||
subvolgrpInfo := map[string]string{
|
||||
"clusterID-1": "subvolgrp1",
|
||||
"clusterID-2": "subvolgrp2",
|
||||
}
|
||||
err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
||||
clusterInfo := map[string]map[string]string{}
|
||||
clusterID1 := "clusterID-1"
|
||||
clusterID2 := "clusterID-2"
|
||||
clusterInfo[clusterID1] = map[string]string{}
|
||||
clusterInfo[clusterID1]["subvolumeGroup"] = "subvolgrp1"
|
||||
clusterInfo[clusterID2] = map[string]string{}
|
||||
clusterInfo[clusterID2]["subvolumeGroup"] = "subvolgrp2"
|
||||
|
||||
err = createCustomConfigMap(f.ClientSet, cephFSDirPath, clusterInfo)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -553,7 +558,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -574,7 +579,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -582,11 +587,11 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
||||
}
|
||||
err = deleteConfigMap(cephfsDirPath)
|
||||
err = deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -703,7 +708,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||
}
|
||||
@ -782,7 +787,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ func createCephfsStorageClass(
|
||||
f *framework.Framework,
|
||||
enablePool bool,
|
||||
params map[string]string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -90,7 +90,7 @@ func createCephfsStorageClass(
|
||||
}
|
||||
|
||||
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "secret.yaml")
|
||||
sc, err := getSecret(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -131,9 +131,9 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
_, stdErr, err := execCommandInDaemonsetPod(
|
||||
f,
|
||||
cmd,
|
||||
cephfsDeamonSetName,
|
||||
cephFSDeamonSetName,
|
||||
pod.Spec.NodeName,
|
||||
cephfsContainerName,
|
||||
cephFSContainerName,
|
||||
cephCSINamespace)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||
|
@ -76,7 +76,10 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
||||
}
|
||||
|
||||
// createCustomConfigMap provides multiple clusters information.
|
||||
func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) error {
|
||||
func createCustomConfigMap(
|
||||
c kubernetes.Interface,
|
||||
pluginPath string,
|
||||
clusterInfo map[string]map[string]string) error {
|
||||
path := pluginPath + configMap
|
||||
cm := v1.ConfigMap{}
|
||||
err := unmarshal(path, &cm)
|
||||
@ -90,22 +93,36 @@ func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpI
|
||||
}
|
||||
// get clusterIDs
|
||||
var clusterID []string
|
||||
for key := range subvolgrpInfo {
|
||||
for key := range clusterInfo {
|
||||
clusterID = append(clusterID, key)
|
||||
}
|
||||
conmap := []util.ClusterInfo{
|
||||
{
|
||||
ClusterID: clusterID[0],
|
||||
Monitors: mons,
|
||||
},
|
||||
{
|
||||
ClusterID: clusterID[1],
|
||||
Monitors: mons,
|
||||
},
|
||||
conmap := make([]util.ClusterInfo, len(clusterID))
|
||||
|
||||
for i, j := range clusterID {
|
||||
conmap[i].ClusterID = j
|
||||
conmap[i].Monitors = mons
|
||||
}
|
||||
for i := 0; i < len(subvolgrpInfo); i++ {
|
||||
conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]]
|
||||
|
||||
// fill radosNamespace and subvolgroups
|
||||
for cluster, confItems := range clusterInfo {
|
||||
for i, j := range confItems {
|
||||
switch i {
|
||||
case "subvolumeGroup":
|
||||
for c := range conmap {
|
||||
if conmap[c].ClusterID == cluster {
|
||||
conmap[c].CephFS.SubvolumeGroup = j
|
||||
}
|
||||
}
|
||||
case "radosNamespace":
|
||||
for c := range conmap {
|
||||
if conmap[c].ClusterID == cluster {
|
||||
conmap[c].RadosNamespace = j
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(conmap)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -18,9 +18,9 @@ func init() {
|
||||
log.SetOutput(GinkgoWriter)
|
||||
|
||||
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
||||
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephfs csi driver")
|
||||
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephFS csi driver")
|
||||
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
|
||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver")
|
||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
|
||||
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
||||
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
||||
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
||||
|
116
e2e/migration.go
Normal file
116
e2e/migration.go
Normal file
@ -0,0 +1,116 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func validateRBDStaticMigrationPVDeletion(f *framework.Framework, appPath, scName string, isBlock bool) error {
|
||||
opt := make(map[string]string)
|
||||
var (
|
||||
rbdImageName = "kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412"
|
||||
pvName = "pv-name"
|
||||
pvcName = "pvc-name"
|
||||
namespace = f.UniqueName
|
||||
sc = scName
|
||||
provisionerAnnKey = "pv.kubernetes.io/provisioned-by"
|
||||
provisionerAnnValue = "rbd.csi.ceph.com"
|
||||
)
|
||||
|
||||
c := f.ClientSet
|
||||
PVAnnMap := make(map[string]string)
|
||||
PVAnnMap[provisionerAnnKey] = provisionerAnnValue
|
||||
mons, err := getMons(rookNamespace, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get mons: %w", err)
|
||||
}
|
||||
mon := strings.Join(mons, ",")
|
||||
size := staticPVSize
|
||||
// create rbd image
|
||||
cmd := fmt.Sprintf(
|
||||
"rbd create %s --size=%s --image-feature=layering %s",
|
||||
rbdImageName,
|
||||
staticPVSize,
|
||||
rbdOptions(defaultRBDPool))
|
||||
|
||||
_, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stdErr != "" {
|
||||
return fmt.Errorf("failed to create rbd image %s", stdErr)
|
||||
}
|
||||
|
||||
opt["migration"] = "true"
|
||||
opt["monitors"] = mon
|
||||
opt["imageFeatures"] = staticPVImageFeature
|
||||
opt["pool"] = defaultRBDPool
|
||||
opt["staticVolume"] = strconv.FormatBool(true)
|
||||
opt["imageName"] = rbdImageName
|
||||
|
||||
// Make volumeID similar to the migration volumeID
|
||||
volID := composeIntreeMigVolID(mon, rbdImageName)
|
||||
pv := getStaticPV(
|
||||
pvName,
|
||||
volID,
|
||||
size,
|
||||
rbdNodePluginSecretName,
|
||||
cephCSINamespace,
|
||||
sc,
|
||||
provisionerAnnValue,
|
||||
isBlock,
|
||||
opt,
|
||||
PVAnnMap,
|
||||
deletePolicy)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Create API error: %w", err)
|
||||
}
|
||||
|
||||
pvc := getStaticPVC(pvcName, pvName, size, namespace, sc, isBlock)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Create API error: %w", err)
|
||||
}
|
||||
// bind pvc to app
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
app.Namespace = namespace
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcName
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = deletePVCAndApp("", f, pvc, app)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete PVC and application with error %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// composeIntreeMigVolID create a volID similar to intree migration volID
|
||||
// the migration volID format looks like below
|
||||
// mig-mons-<hash>-image-<UUID_<poolhash>
|
||||
// nolint:lll // ex: "mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c
|
||||
func composeIntreeMigVolID(mons, rbdImageName string) string {
|
||||
poolField := hex.EncodeToString([]byte(defaultRBDPool))
|
||||
monsField := monsPrefix + getMonsHash(mons)
|
||||
imageUID := strings.Split(rbdImageName, intreeVolPrefix)[1:]
|
||||
imageField := imagePrefix + imageUID[0]
|
||||
vhSlice := []string{migIdentifier, monsField, imageField, poolField}
|
||||
|
||||
return strings.Join(vhSlice, "_")
|
||||
}
|
@ -374,6 +374,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||
})
|
||||
}
|
||||
|
||||
// nolint:unparam // currently skipNotFound is always false, this can change in the future
|
||||
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||
err := retryKubectlArgs(
|
||||
ns,
|
||||
|
197
e2e/rbd.go
197
e2e/rbd.go
@ -366,6 +366,47 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
})
|
||||
}
|
||||
By("validate RBD migration+static Block PVC Deletion", func() {
|
||||
// create monitors hash by fetching monitors from the cluster.
|
||||
mons, err := getMons(rookNamespace, c)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get monitors %v", err)
|
||||
}
|
||||
mon := strings.Join(mons, ",")
|
||||
inClusterID := getMonsHash(mon)
|
||||
|
||||
clusterInfo := map[string]map[string]string{}
|
||||
clusterInfo[inClusterID] = map[string]string{}
|
||||
|
||||
// create custom configmap
|
||||
err = createCustomConfigMap(f.ClientSet, rbdDirPath, clusterInfo)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, "migrationsc", nil, nil, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
// restart csi pods for the configmap to take effect.
|
||||
err = recreateCSIRBDPods(f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to recreate rbd csi pods with error %v", err)
|
||||
}
|
||||
err = validateRBDStaticMigrationPVDeletion(f, rawAppPath, "migrationsc", true)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
err = deleteConfigMap(rbdDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
err = createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create a PVC and validate owner", func() {
|
||||
err := validateImageOwner(pvcPath, f)
|
||||
@ -1061,7 +1102,8 @@ var _ = Describe("RBD", func() {
|
||||
snapshotPath,
|
||||
pvcClonePath,
|
||||
appClonePath,
|
||||
noKMS,
|
||||
noKMS, noKMS,
|
||||
defaultSCName,
|
||||
f)
|
||||
}
|
||||
})
|
||||
@ -1128,7 +1170,11 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
|
||||
validatePVCSnapshot(1, pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath, vaultKMS, f)
|
||||
validatePVCSnapshot(1,
|
||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath,
|
||||
vaultKMS, vaultKMS,
|
||||
defaultSCName,
|
||||
f)
|
||||
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
@ -1140,6 +1186,135 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
})
|
||||
|
||||
By("Validate PVC restore from vaultKMS to vaultTenantSAKMS", func() {
|
||||
if !k8sVersionGreaterEquals(f.ClientSet, 1, 16) {
|
||||
Skip("pvc clone is only supported from v1.16+")
|
||||
}
|
||||
restoreSCName := "restore-sc"
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
scOpts := map[string]string{
|
||||
"encrypted": "true",
|
||||
"encryptionKMSID": "vault-test",
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
|
||||
scOpts = map[string]string{
|
||||
"encrypted": "true",
|
||||
"encryptionKMSID": "vault-tenant-sa-test",
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
|
||||
err = createTenantServiceAccount(f.ClientSet, f.UniqueName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create ServiceAccount: %v", err)
|
||||
}
|
||||
defer deleteTenantServiceAccount(f.UniqueName)
|
||||
|
||||
validatePVCSnapshot(1,
|
||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath,
|
||||
vaultKMS, vaultTenantSAKMS,
|
||||
restoreSCName, f)
|
||||
|
||||
err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass %q: %v", restoreSCName, err)
|
||||
}
|
||||
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("Validate thick PVC restore from vaultKMS to userSecretsMetadataKMS", func() {
|
||||
if !k8sVersionGreaterEquals(f.ClientSet, 1, 16) {
|
||||
Skip("pvc clone is only supported from v1.16+")
|
||||
}
|
||||
restoreSCName := "restore-sc"
|
||||
err := deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
scOpts := map[string]string{
|
||||
"encrypted": "true",
|
||||
"encryptionKMSID": "vault-test",
|
||||
"thickProvision": "true",
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, scOpts, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
|
||||
scOpts = map[string]string{
|
||||
"encrypted": "true",
|
||||
"encryptionKMSID": "user-secrets-metadata-test",
|
||||
"thickProvision": "true",
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, restoreSCName, nil, scOpts, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
|
||||
// PVC creation namespace where secret will be created
|
||||
namespace := f.UniqueName
|
||||
|
||||
// create user Secret
|
||||
err = retryKubectlFile(namespace, kubectlCreate, vaultExamplePath+vaultUserSecret, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create user Secret: %v", err)
|
||||
}
|
||||
|
||||
validatePVCSnapshot(1,
|
||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath,
|
||||
vaultKMS, secretsMetadataKMS,
|
||||
restoreSCName, f)
|
||||
|
||||
// delete user secret
|
||||
err = retryKubectlFile(namespace,
|
||||
kubectlDelete,
|
||||
vaultExamplePath+vaultUserSecret,
|
||||
deployTimeout,
|
||||
"--ignore-not-found=true")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user Secret: %v", err)
|
||||
}
|
||||
|
||||
err = retryKubectlArgs(cephCSINamespace, kubectlDelete, deployTimeout, "storageclass", restoreSCName)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass %q: %v", restoreSCName, err)
|
||||
}
|
||||
|
||||
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass: %v", err)
|
||||
}
|
||||
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, nil, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
By("create an encrypted PVC-PVC clone and bind it to an app", func() {
|
||||
if !k8sVersionGreaterEquals(f.ClientSet, 1, 16) {
|
||||
Skip("pvc clone is only supported from v1.16+")
|
||||
@ -1435,6 +1610,24 @@ var _ = Describe("RBD", func() {
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("validate RBD migration+static FileSystem PVC", func() {
|
||||
err := validateRBDStaticMigrationPV(f, appPath, false)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate rbd migrated static pv with error %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("validate RBD migration+static Block PVC", func() {
|
||||
err := validateRBDStaticMigrationPV(f, rawAppPath, true)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate rbd migrated static block pv with error %v", err)
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
})
|
||||
|
||||
By("validate failure of RBD static PVC without imageFeatures parameter", func() {
|
||||
err := validateRBDStaticPV(f, rawAppPath, true, true)
|
||||
if err != nil {
|
||||
|
@ -649,9 +649,9 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
||||
return err
|
||||
}
|
||||
|
||||
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||
func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
||||
cmds := []string{}
|
||||
if cephfs {
|
||||
if cephFS {
|
||||
// ceph fs fail
|
||||
// ceph fs rm myfs --yes-i-really-mean-it
|
||||
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||
@ -974,3 +974,22 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func recreateCSIRBDPods(f *framework.Framework) error {
|
||||
err := deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)",
|
||||
cephCSINamespace, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete pods with labels with error %w", err)
|
||||
}
|
||||
// wait for csi pods to come up
|
||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("timeout waiting for daemonset pods with error %w", err)
|
||||
}
|
||||
err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("timeout waiting for deployment to be in running state with error %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func deleteRBDSnapshotClass() error {
|
||||
}
|
||||
|
||||
func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "snapshotclass.yaml")
|
||||
sc := getSnapshotClass(scPath)
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-name"] = cephFSProvisionerSecretName
|
||||
|
141
e2e/staticpvc.go
141
e2e/staticpvc.go
@ -12,16 +12,26 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
staticPVSize = "4Gi"
|
||||
staticPVImageFeature = "layering"
|
||||
monsPrefix = "mons-"
|
||||
imagePrefix = "image-"
|
||||
migIdentifier = "mig"
|
||||
intreeVolPrefix = "kubernetes-dynamic-pvc-"
|
||||
)
|
||||
|
||||
// nolint:unparam // currently name receive pvName, this can change in the future
|
||||
func getStaticPV(
|
||||
name, volName, size, secretName, secretNS, sc, driverName string,
|
||||
blockPV bool,
|
||||
options map[string]string) *v1.PersistentVolume {
|
||||
options, annotations map[string]string, policy v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
|
||||
PersistentVolumeReclaimPolicy: policy,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse(size),
|
||||
},
|
||||
@ -49,10 +59,17 @@ func getStaticPV(
|
||||
volumeMode := v1.PersistentVolumeFilesystem
|
||||
pv.Spec.VolumeMode = &volumeMode
|
||||
}
|
||||
if len(annotations) > 0 {
|
||||
pv.Annotations = make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
pv.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return pv
|
||||
}
|
||||
|
||||
// nolint:unparam // currently name receive same name, this can change in the future
|
||||
func getStaticPVC(name, pvName, size, ns, sc string, blockPVC bool) *v1.PersistentVolumeClaim {
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -104,12 +121,12 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
|
||||
}
|
||||
// remove new line present in fsID
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
size := "4Gi"
|
||||
size := staticPVSize
|
||||
// create rbd image
|
||||
cmd := fmt.Sprintf(
|
||||
"rbd create %s --size=%d --image-feature=layering %s",
|
||||
"rbd create %s --size=%s --image-feature=layering %s",
|
||||
rbdImageName,
|
||||
4096,
|
||||
staticPVSize,
|
||||
rbdOptions(defaultRBDPool))
|
||||
|
||||
_, e, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
@ -121,7 +138,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
|
||||
}
|
||||
opt["clusterID"] = fsID
|
||||
if !checkImgFeat {
|
||||
opt["imageFeatures"] = "layering"
|
||||
opt["imageFeatures"] = staticPVImageFeature
|
||||
}
|
||||
opt["pool"] = defaultRBDPool
|
||||
opt["staticVolume"] = strconv.FormatBool(true)
|
||||
@ -138,7 +155,8 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
|
||||
sc,
|
||||
"rbd.csi.ceph.com",
|
||||
isBlock,
|
||||
opt)
|
||||
opt,
|
||||
nil, retainPolicy)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
@ -189,6 +207,102 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock, checkI
|
||||
return err
|
||||
}
|
||||
|
||||
func validateRBDStaticMigrationPV(f *framework.Framework, appPath string, isBlock bool) error {
|
||||
opt := make(map[string]string)
|
||||
var (
|
||||
rbdImageName = "test-static-pv"
|
||||
pvName = "pv-name"
|
||||
pvcName = "pvc-name"
|
||||
namespace = f.UniqueName
|
||||
// minikube creates default class in cluster, we need to set dummy
|
||||
// storageclass on PV and PVC to avoid storageclass name mismatch
|
||||
sc = "storage-class"
|
||||
)
|
||||
|
||||
c := f.ClientSet
|
||||
mons, err := getMons(rookNamespace, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get mons: %w", err)
|
||||
}
|
||||
mon := strings.Join(mons, ",")
|
||||
size := staticPVSize
|
||||
// create rbd image
|
||||
cmd := fmt.Sprintf(
|
||||
"rbd create %s --size=%d --image-feature=layering %s",
|
||||
rbdImageName,
|
||||
4096,
|
||||
rbdOptions(defaultRBDPool))
|
||||
|
||||
_, e, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e != "" {
|
||||
return fmt.Errorf("failed to create rbd image %s", e)
|
||||
}
|
||||
|
||||
opt["migration"] = "true"
|
||||
opt["monitors"] = mon
|
||||
opt["imageFeatures"] = staticPVImageFeature
|
||||
opt["pool"] = defaultRBDPool
|
||||
opt["staticVolume"] = strconv.FormatBool(true)
|
||||
opt["imageName"] = rbdImageName
|
||||
pv := getStaticPV(
|
||||
pvName,
|
||||
rbdImageName,
|
||||
size,
|
||||
rbdNodePluginSecretName,
|
||||
cephCSINamespace,
|
||||
sc,
|
||||
"rbd.csi.ceph.com",
|
||||
isBlock,
|
||||
opt, nil, retainPolicy)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PV Create API error: %w", err)
|
||||
}
|
||||
|
||||
pvc := getStaticPVC(pvcName, pvName, size, namespace, sc, isBlock)
|
||||
|
||||
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("PVC Create API error: %w", err)
|
||||
}
|
||||
// bind pvc to app
|
||||
app, err := loadApp(appPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
app.Namespace = namespace
|
||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcName
|
||||
err = createApp(f.ClientSet, app, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete pvc: %w", err)
|
||||
}
|
||||
|
||||
err = c.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete pv: %w", err)
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
||||
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint:gocyclo,cyclop // reduce complexity
|
||||
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
||||
opt := make(map[string]string)
|
||||
@ -282,7 +396,18 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
opt["fsName"] = fsName
|
||||
opt["staticVolume"] = strconv.FormatBool(true)
|
||||
opt["rootPath"] = rootPath
|
||||
pv := getStaticPV(pvName, pvName, "4Gi", secretName, cephCSINamespace, sc, "cephfs.csi.ceph.com", false, opt)
|
||||
pv := getStaticPV(
|
||||
pvName,
|
||||
pvName,
|
||||
staticPVSize,
|
||||
secretName,
|
||||
cephCSINamespace,
|
||||
sc,
|
||||
"cephfs.csi.ceph.com",
|
||||
false,
|
||||
opt,
|
||||
nil,
|
||||
retainPolicy)
|
||||
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create PV: %w", err)
|
||||
|
@ -37,7 +37,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
appKey = "app"
|
||||
appLabel = "cephfs-upgrade-testing"
|
||||
)
|
||||
// deploy cephfs CSI
|
||||
// deploy cephFS CSI
|
||||
BeforeEach(func() {
|
||||
if !upgradeTesting || !testCephFS {
|
||||
Skip("Skipping CephFS Upgrade Test")
|
||||
@ -60,7 +60,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to upgrade csi with error %v", err)
|
||||
}
|
||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -108,7 +108,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err = deleteConfigMap(cephfsDirPath)
|
||||
err = deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
@ -124,11 +124,11 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -148,22 +148,22 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
Context("Cephfs Upgrade Test", func() {
|
||||
It("Cephfs Upgrade Test", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
})
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephFSDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("upgrade to latest changes and verify app re-mount", func() {
|
||||
// TODO: fetch pvc size from spec.
|
||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
||||
appPath := cephfsExamplePath + "pod.yaml"
|
||||
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||
appPath := cephFSExamplePath + "pod.yaml"
|
||||
data := "check data persists"
|
||||
label := make(map[string]string)
|
||||
|
||||
@ -218,7 +218,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// pvc clone is only supported from v1.16+
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 17) {
|
||||
// Create snapshot of the pvc
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Name = "cephfs-pvc-snapshot"
|
||||
snap.Namespace = f.UniqueName
|
||||
@ -241,14 +241,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
|
||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
|
||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
|
||||
app.Labels = label
|
||||
@ -261,8 +261,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
})
|
||||
|
||||
By("Create clone from a snapshot", func() {
|
||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
||||
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||
label := make(map[string]string)
|
||||
|
||||
// pvc clone is only supported from v1.16+
|
||||
@ -310,7 +310,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
|
||||
// Delete the snapshot of the parent pvc.
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Name = "cephfs-pvc-snapshot"
|
||||
snap.Namespace = f.UniqueName
|
||||
@ -324,8 +324,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
})
|
||||
|
||||
By("Create clone from existing PVC", func() {
|
||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
||||
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||
label := make(map[string]string)
|
||||
|
||||
// pvc clone is only supported from v1.16+
|
||||
|
45
e2e/utils.go
45
e2e/utils.go
@ -2,6 +2,7 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5" //nolint:gosec // hash generation
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -102,6 +103,10 @@ func getMons(ns string, c kubernetes.Interface) ([]string, error) {
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getMonsHash(mons string) string {
|
||||
return fmt.Sprintf("%x", md5.Sum([]byte(mons))) //nolint:gosec // hash generation
|
||||
}
|
||||
|
||||
func getStorageClass(path string) (scv1.StorageClass, error) {
|
||||
sc := scv1.StorageClass{}
|
||||
err := unmarshal(path, &sc)
|
||||
@ -422,7 +427,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framework) error {
|
||||
func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framework) error {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -433,13 +438,13 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cephfs {
|
||||
if cephFS {
|
||||
err = deleteBackingCephFSVolume(f, pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete cephfs filesystem
|
||||
err = deletePool("myfs", cephfs, f)
|
||||
// delete cephFS filesystem
|
||||
err = deletePool("myfs", cephFS, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -449,7 +454,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
||||
return err
|
||||
}
|
||||
// delete rbd pool
|
||||
err = deletePool(defaultRBDPool, cephfs, f)
|
||||
err = deletePool(defaultRBDPool, cephFS, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -753,7 +758,8 @@ func validatePVCClone(
|
||||
func validatePVCSnapshot(
|
||||
totalCount int,
|
||||
pvcPath, appPath, snapshotPath, pvcClonePath, appClonePath string,
|
||||
kms kmsConfig,
|
||||
kms, restoreKMS kmsConfig,
|
||||
restoreSCName string,
|
||||
f *framework.Framework) {
|
||||
var wg sync.WaitGroup
|
||||
wgErrs := make([]error, totalCount)
|
||||
@ -854,6 +860,9 @@ func validatePVCSnapshot(
|
||||
pvcClone.Namespace = f.UniqueName
|
||||
appClone.Namespace = f.UniqueName
|
||||
pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0)
|
||||
if restoreSCName != "" {
|
||||
pvcClone.Spec.StorageClassName = &restoreSCName
|
||||
}
|
||||
|
||||
// create multiple PVC from same snapshot
|
||||
wg.Add(totalCount)
|
||||
@ -867,6 +876,26 @@ func validatePVCSnapshot(
|
||||
LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]),
|
||||
}
|
||||
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
||||
if wgErrs[n] == nil && restoreKMS != noKMS {
|
||||
if restoreKMS.canGetPassphrase() {
|
||||
imageData, sErr := getImageInfoFromPVC(p.Namespace, name, f)
|
||||
if sErr != nil {
|
||||
wgErrs[n] = fmt.Errorf(
|
||||
"failed to get image info for %s namespace=%s volumehandle=%s error=%w",
|
||||
name,
|
||||
p.Namespace,
|
||||
imageData.csiVolumeHandle,
|
||||
sErr)
|
||||
} else {
|
||||
// check new passphrase created
|
||||
_, stdErr := restoreKMS.getPassphrase(f, imageData.csiVolumeHandle)
|
||||
if stdErr != "" {
|
||||
wgErrs[n] = fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
wgErrs[n] = isEncryptedPVC(f, &p, &a)
|
||||
}
|
||||
if wgErrs[n] == nil {
|
||||
filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"
|
||||
var checkSumClone string
|
||||
@ -883,9 +912,6 @@ func validatePVCSnapshot(
|
||||
checkSumClone)
|
||||
}
|
||||
}
|
||||
if wgErrs[n] == nil && kms != noKMS {
|
||||
wgErrs[n] = isEncryptedPVC(f, &p, &a)
|
||||
}
|
||||
wg.Done()
|
||||
}(i, *pvcClone, *appClone)
|
||||
}
|
||||
@ -1332,6 +1358,7 @@ func retryKubectlFile(namespace string, action kubectlAction, filename string, t
|
||||
// retryKubectlArgs takes a namespace and action telling kubectl what to do
|
||||
// with the passed arguments. This function retries until no error occurred, or
|
||||
// the timeout passed.
|
||||
// nolint:unparam // retryKubectlArgs will be used with kubectlDelete arg later on.
|
||||
func retryKubectlArgs(namespace string, action kubectlAction, t int, args ...string) error {
|
||||
timeout := time.Duration(t) * time.Minute
|
||||
args = append([]string{string(action)}, args...)
|
||||
|
@ -44,7 +44,7 @@ data:
|
||||
"<MONValueN>"
|
||||
],
|
||||
"cephFS": {
|
||||
"subvolumeGroup": "<subvolumegroup for cephfs volumes>"
|
||||
"subvolumeGroup": "<subvolumegroup for cephFS volumes>"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -71,10 +71,18 @@ parameters:
|
||||
|
||||
# (optional) ceph client log location, eg: rbd-nbd
|
||||
# By default host-path /var/log/ceph of node is bind-mounted into
|
||||
# csi-rbdplugin pod at /var/log/ceph mount path. See docs/rbd-nbd.md
|
||||
# for available configuration options.
|
||||
# csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
|
||||
# target bindmount path used inside container for ceph clients logging.
|
||||
# See docs/rbd-nbd.md for available configuration options.
|
||||
# cephLogDir: /var/log/ceph
|
||||
|
||||
# (optional) ceph client log strategy
|
||||
# By default, log file belonging to a particular volume will be deleted
|
||||
# on unmap, but you can choose to just compress instead of deleting it
|
||||
# or even preserve the log file in text format as it is.
|
||||
# Available options `remove` or `compress` or `preserve`
|
||||
# cephLogStrategy: remove
|
||||
|
||||
# (optional) Prefix to use for naming RBD images.
|
||||
# If omitted, defaults to "csi-vol-".
|
||||
# volumeNamePrefix: "foo-bar-"
|
||||
|
77
go.mod
77
go.mod
@ -3,19 +3,20 @@ module github.com/ceph/ceph-csi
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.40.34
|
||||
github.com/aws/aws-sdk-go v1.40.50
|
||||
github.com/ceph/ceph-csi/api v0.0.0-00010101000000-000000000000
|
||||
github.com/ceph/go-ceph v0.11.0
|
||||
github.com/container-storage-interface/spec v1.5.0
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0
|
||||
github.com/csi-addons/spec v0.1.0
|
||||
github.com/csi-addons/spec v0.1.1
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||
github.com/hashicorp/vault/api v1.1.1
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.10.0
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
|
||||
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.16.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
@ -23,53 +24,53 @@ require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2
|
||||
google.golang.org/grpc v1.40.0
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
google.golang.org/grpc v1.41.0
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v1.22.1
|
||||
k8s.io/cloud-provider v0.22.2
|
||||
k8s.io/klog/v2 v2.10.0
|
||||
k8s.io/kubernetes v1.22.1
|
||||
k8s.io/mount-utils v0.22.1
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176
|
||||
sigs.k8s.io/controller-runtime v0.10.0
|
||||
k8s.io/kubernetes v1.22.2
|
||||
k8s.io/mount-utils v0.22.2
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
sigs.k8s.io/controller-runtime v0.10.1
|
||||
)
|
||||
|
||||
replace (
|
||||
code.cloudfoundry.org/gofileutils => github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f
|
||||
github.com/ceph/ceph-csi/api => ./api
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.4.3
|
||||
github.com/hashicorp/vault/api => github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||
github.com/hashicorp/vault/sdk => github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b
|
||||
github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||
gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||
//
|
||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||
//
|
||||
k8s.io/api => k8s.io/api v0.22.1
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.1
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.22.1
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.1
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.1
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.1
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.22.1
|
||||
k8s.io/component-base => k8s.io/component-base v0.22.1
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.22.1
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.22.1
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.22.1
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.1
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.1
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.1
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.1
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.1
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.22.1
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.22.1
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.1
|
||||
k8s.io/metrics => k8s.io/metrics v0.22.1
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.22.1
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.1
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.1
|
||||
k8s.io/api => k8s.io/api v0.22.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.2
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.2
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.22.2
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.2
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.2
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.2
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.22.2
|
||||
k8s.io/component-base => k8s.io/component-base v0.22.2
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.22.2
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.22.2
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.22.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.2
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.2
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.2
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.22.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.22.2
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.2
|
||||
k8s.io/metrics => k8s.io/metrics v0.22.2
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.22.2
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.2
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.2
|
||||
)
|
||||
|
||||
// This tag doesn't exist, but is imported by github.com/portworx/sched-ops.
|
||||
|
139
go.sum
139
go.sum
@ -133,8 +133,8 @@ github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
|
||||
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.34 h1:SBYmodndE2d4AYucuuJnOXk4MD1SFbucoIdpwKVKeSA=
|
||||
github.com/aws/aws-sdk-go v1.40.34/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.40.50 h1:QP4NC9EZWBszbNo2UbG6bbObMtN35kCFb4h0r08q884=
|
||||
github.com/aws/aws-sdk-go v1.40.50/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@ -155,7 +155,10 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
|
||||
github.com/ceph/go-ceph v0.11.0 h1:A1pphV40LL8GQKDPpU4XqCa7gkmozsst7rhCC730/nk=
|
||||
@ -183,7 +186,7 @@ github.com/cloudfoundry/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1
|
||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
@ -233,10 +236,16 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0 h1:tGs42wfjkObbBo/98a3uxTFWEJ1dq5PIMqPWtdLd040=
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0/go.mod h1:ROQlEsc2EerVtc/K/C+6Hx8pqaQ9MVy9xFFpyKfI9lc=
|
||||
github.com/csi-addons/spec v0.1.0 h1:y3TOd7qtnwBQPikGa1VvaL7ObyddAZehYW8DNGBlOyc=
|
||||
github.com/csi-addons/spec v0.1.0/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
|
||||
github.com/csi-addons/spec v0.1.1 h1:Bm9ZVCQ+nYMs7Y5PK+izkzCeer262W4rjCyGpuqu9C4=
|
||||
github.com/csi-addons/spec v0.1.1/go.mod h1:Mwq4iLiUV4s+K1bszcWU6aMsR5KPsbIYzzszJ6+56vI=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
|
||||
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
|
||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
|
||||
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -278,7 +287,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
@ -311,6 +320,7 @@ github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE
|
||||
github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
|
||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
|
||||
github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
@ -417,6 +427,7 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
@ -485,8 +496,9 @@ github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o=
|
||||
github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
@ -507,6 +519,7 @@ github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4=
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
@ -569,8 +582,15 @@ github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.5/go.mod h1:b6RwFD1bny1zbf
|
||||
github.com/hashicorp/vault-plugin-secrets-kv v0.5.5/go.mod h1:oNyUoMMQq6uNTwyYPnkldiedaknYbPfQIdKoyKQdy2g=
|
||||
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2/go.mod h1:YRW9zn9NZNitRlPYNAWRp/YEdKCF/X8aOg8IYSxFT5Y=
|
||||
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.3-0.20200518214608-746aba5fead6/go.mod h1:9Cy4Jp779BjuIOhYLjEfH3M3QCUxZgPnvJ3tAOOmof4=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a h1:1DIoo5Mqq4RKFpL2iOmrX7DJIdMLiAt1Tv5f8nMJqRI=
|
||||
github.com/hashicorp/vault/api v1.0.1/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20190730042357-746c0b111519/go.mod h1:i9PKqwFko/s/aihU1uuHGh/FaQS+Xcgvd9dvnfAvQb0=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE=
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk=
|
||||
github.com/hashicorp/vault/api v1.1.1 h1:907ld+Z9cALyvbZK2qUX9cLwvSaEQsMVQB3x2KE8+AI=
|
||||
github.com/hashicorp/vault/api v1.1.1/go.mod h1:29UXcn/1cLOPHQNMWA7bCz2By4PSd0VKPAydKXS5yN0=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b h1:vQeIf4LdAqtYoD3N6KSiYilntYZq0F0vxcBTlx/69wg=
|
||||
github.com/hashicorp/vault/sdk v0.1.14-0.20201116234512-b4d4137dfe8b/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
@ -661,8 +681,8 @@ github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.
|
||||
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
|
||||
github.com/libopenstorage/openstorage v8.0.0+incompatible/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
|
||||
github.com/libopenstorage/operator v0.0.0-20200725001727-48d03e197117/go.mod h1:Qh+VXOB6hj60VmlgsmY+R1w+dFuHK246UueM4SAqZG0=
|
||||
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec h1:ezv9ybzCRb86E8aMgG7/GcNSRU/72D0BVEhkNjnCEz8=
|
||||
github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY=
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a h1:A4GqCY7+LrLmO0F1EK27iLqa5ZuvWeaVwGvhmzFb12s=
|
||||
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY=
|
||||
github.com/libopenstorage/stork v1.3.0-beta1.0.20200630005842-9255e7a98775/go.mod h1:qBSzYTJVHlOMg5RINNiHD1kBzlasnrc2uKLPZLgu1Qs=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
@ -786,15 +806,18 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
|
||||
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
|
||||
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda h1:VoJmrqbFDuqzjlByItbjx/HxmReK4LC+X3Jt2Wv2Ogs=
|
||||
github.com/openshift/api v0.0.0-20210927171657-636513e97fda/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8=
|
||||
github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
||||
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
||||
github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
@ -899,6 +922,7 @@ github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil v2.19.9+incompatible h1:IrPVlK4nfwW10DF7pW+7YJKws9NkgNzWozwwWv9FsgY=
|
||||
github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
@ -1044,6 +1068,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -1184,6 +1209,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1332,6 +1358,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@ -1453,8 +1480,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
@ -1493,6 +1520,7 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76
|
||||
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
@ -1522,28 +1550,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
|
||||
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
|
||||
k8s.io/apiextensions-apiserver v0.22.1 h1:YSJYzlFNFSfUle+yeEXX0lSQyLEoxoPJySRupepb0gE=
|
||||
k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c=
|
||||
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apiserver v0.22.1 h1:Ul9Iv8OMB2s45h2tl5XWPpAZo1VPIJ/6N+MESeed7L8=
|
||||
k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400=
|
||||
k8s.io/cli-runtime v0.22.1/go.mod h1:YqwGrlXeEk15Yn3em2xzr435UGwbrCw5x+COQoTYfoo=
|
||||
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
|
||||
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
|
||||
k8s.io/cloud-provider v0.22.1 h1:bxNgHd0chiPpXQ8jzibRrbwuCRPrTgQiFSLbgVebzHs=
|
||||
k8s.io/cloud-provider v0.22.1/go.mod h1:Dm3xJ4j3l88rZ0LBCRLrt7V9Pz0avRAzZSU6ENwYnrw=
|
||||
k8s.io/cluster-bootstrap v0.22.1/go.mod h1:dSWw6aox00AA9YCdRDY+ca7TVtoXRzuLpDxhV6HPevk=
|
||||
k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo=
|
||||
k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
|
||||
k8s.io/component-helpers v0.22.1 h1:f8pdhKNQbsCMQa6E9ipVlO8G6WFXnKbEDVcWB8n/HkA=
|
||||
k8s.io/component-helpers v0.22.1/go.mod h1:QvBcDbX+qU5I2tMZABBF5fRwAlQwiv771IGBHK9WYh4=
|
||||
k8s.io/controller-manager v0.22.1/go.mod h1:HN5qzvZs8A4fd/xuqDZwqe+Nsz249a2Kbq/YqZ903n8=
|
||||
k8s.io/cri-api v0.22.1/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg=
|
||||
k8s.io/csi-translation-lib v0.22.1/go.mod h1:3MuSQekn6WWgWJk5vgufqoTjB4jqBEe04TtimXjubcE=
|
||||
k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
|
||||
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
|
||||
k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4=
|
||||
k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA=
|
||||
k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk=
|
||||
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
|
||||
k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
|
||||
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
|
||||
k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc=
|
||||
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
|
||||
k8s.io/cloud-provider v0.22.2 h1:CiSDHMJiOd6qgYIP8ln9ueFHFU5Ld8TDZiYNIiMNbNk=
|
||||
k8s.io/cloud-provider v0.22.2/go.mod h1:HUvZkUkV6dIKgWJQgGvnFhOeEHT87ZP39ij4K0fgkAs=
|
||||
k8s.io/cluster-bootstrap v0.22.2/go.mod h1:ZkmQKprEqvrUccMnbRHISsMscA1dsQ8SffM9nHq6CgE=
|
||||
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M=
|
||||
k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
|
||||
k8s.io/component-helpers v0.22.2 h1:guQ9oYclE5LMydWFfAFA+u7SQgQzz2g+YgpJ5QooSyY=
|
||||
k8s.io/component-helpers v0.22.2/go.mod h1:+N61JAR9aKYSWbnLA88YcFr9K/6ISYvRNybX7QW7Rs8=
|
||||
k8s.io/controller-manager v0.22.2/go.mod h1:zeDUbCc66IcMZ81U8qC5Z5pm9A8QkqD7839H8t7//yY=
|
||||
k8s.io/cri-api v0.22.2/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg=
|
||||
k8s.io/csi-translation-lib v0.22.2/go.mod h1:HYNFNKFADblw8nVm3eshFVWdmiccxPHN+SUmTKG3Ctk=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE=
|
||||
@ -1554,31 +1582,30 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE=
|
||||
k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-aggregator v0.22.1/go.mod h1:VbmI+8fUeCPkzSvarWTrlIGEgUGEGI/66SFajDQ0Pdc=
|
||||
k8s.io/kube-controller-manager v0.22.1/go.mod h1:TUXvgmBcDmpYyzDBW+naL0Ljo7IADv6HkYbxg0MLdJY=
|
||||
k8s.io/kube-aggregator v0.22.2/go.mod h1:hsd0LEmVQSvMc0UzAwmcm/Gk3HzLp50mq/o6cu1ky2A=
|
||||
k8s.io/kube-controller-manager v0.22.2/go.mod h1:n8Wh6HHmB+EBy3INhucPEeyZE05qtq8ZWcBgFREYwBk=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-proxy v0.22.1/go.mod h1:Rj3/mSZuSKfDY7XVbDgb12UwiQHy265LOwpK/yR2rtc=
|
||||
k8s.io/kube-scheduler v0.22.1/go.mod h1:32YH9ef2m03E5LfD/H8TMTSppWq3Hav8LON9e+NGC3k=
|
||||
k8s.io/kubectl v0.22.1 h1:kpXO+ajPNTzAVLDM9pAzCsWH9MtCMr92zpcvXMt7P6E=
|
||||
k8s.io/kubectl v0.22.1/go.mod h1:mjAOgEbMNMtZWxnfM6jd+nPjPsaoLqO5xanc78WcSbw=
|
||||
k8s.io/kubelet v0.22.1 h1:ssJ3DxLXtuC3lG4Gif8h1krw5ahSi1fNnzfyZW+Cn/Y=
|
||||
k8s.io/kubelet v0.22.1/go.mod h1:rZuP1msr5NH7IGApW60DYFR3Cs3On4ftWLMJRfg+iU4=
|
||||
k8s.io/kubernetes v1.22.1 h1:xE8OqErmoV/e67JV6/zExQA5sLTb44iW9fprXS2lL7I=
|
||||
k8s.io/kubernetes v1.22.1/go.mod h1:IGQZrV02n2IBp52+/YwLVMurCEQPKXJ/k8hU3mqEOuA=
|
||||
k8s.io/legacy-cloud-providers v0.22.1/go.mod h1:5ejdiQhOxTigKFrFcMvulMCyxxffmkZpk/WMgnknkwI=
|
||||
k8s.io/metrics v0.22.1/go.mod h1:i/ZNap89UkV1gLa26dn7fhKAdheJaKy+moOqJbiif7E=
|
||||
k8s.io/mount-utils v0.22.1 h1:3xMxnOKobldBJMLAEpsC9Y3Drw1aaXz/fhTtYes4VTE=
|
||||
k8s.io/mount-utils v0.22.1/go.mod h1:gUi5ht+05KHYc/vJ9q9wbvG3MCYBeOsB5FdTyM60Pzo=
|
||||
k8s.io/pod-security-admission v0.22.1/go.mod h1:JHgUW6u0VaFaRv2dWh/VYmKL5Hd8ZQCSVuegofSx+rY=
|
||||
k8s.io/sample-apiserver v0.22.1/go.mod h1:1HfRH0fcTF33VZnwAN7fdq/vA+aF1iAhKCWdzKAX7iI=
|
||||
k8s.io/kube-proxy v0.22.2/go.mod h1:pk0QwfYdTsg7aC9ycMF5MFbasIxhBAPFCvfwdmNikZs=
|
||||
k8s.io/kube-scheduler v0.22.2/go.mod h1:aaElZivB8w1u8Ki7QcwuRSL7AcVWC7xa0LzeiT8zQ7I=
|
||||
k8s.io/kubectl v0.22.2 h1:KMyYNZoBshaL3XKx04X07DtpoD4vMrdkfiN/G2Qx/PU=
|
||||
k8s.io/kubectl v0.22.2/go.mod h1:BApg2j0edxLArCOfO0ievI27EeTQqBDMNU9VQH734iQ=
|
||||
k8s.io/kubelet v0.22.2 h1:7ol5AXXxcW97dUE8W/QiPjkXu1ZuGshG5VmgDmviZsc=
|
||||
k8s.io/kubelet v0.22.2/go.mod h1:ORIRua2/wTcx5UnEvxWosu650/8fatmzbMRC7m6WjAM=
|
||||
k8s.io/kubernetes v1.22.2 h1:EkPl3JQjkm9UA7dteLJJQOEwTsJbVINEJtaHAzm/OvE=
|
||||
k8s.io/kubernetes v1.22.2/go.mod h1:Snea7fgIObGgHmLbUJ3OgjGEr5bjj16iEdp5oHS6eS8=
|
||||
k8s.io/legacy-cloud-providers v0.22.2/go.mod h1:oC6zhm9nhJ5M4VTDHzsO/4MpddZR5JqEt55zZ52JRMc=
|
||||
k8s.io/metrics v0.22.2/go.mod h1:GUcsBtpsqQD1tKFS/2wCKu4ZBowwRncLOJH1rgWs3uw=
|
||||
k8s.io/mount-utils v0.22.2 h1:w/CJq+Cofkr81Rp89UkokgEbuu8Js0LwMI/RWWEE+gs=
|
||||
k8s.io/mount-utils v0.22.2/go.mod h1:dHl6c2P60T5LHUnZxVslyly9EDCMzvhtISO5aY+Z4sk=
|
||||
k8s.io/pod-security-admission v0.22.2/go.mod h1:5FK/TIw6rySU522cZVueMcS/LPPovNHbsm1I1gLfVfU=
|
||||
k8s.io/sample-apiserver v0.22.2/go.mod h1:h+/DIV5EmuNq4vfPr5TSXy9mIBVXXlPAKQMPbjPrlFM=
|
||||
k8s.io/system-validators v1.5.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
|
||||
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176 h1:Mx0aa+SUAcNRQbs5jUzV8lkDlGFU8laZsY9jrcVX5SY=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
@ -1592,8 +1619,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
|
||||
sigs.k8s.io/controller-runtime v0.10.0 h1:HgyZmMpjUOrtkaFtCnfxsR1bGRuFoAczSNbn2MoKj5U=
|
||||
sigs.k8s.io/controller-runtime v0.10.0/go.mod h1:GCdh6kqV6IY4LK0JLwX0Zm6g233RtVGdb/f0+KSfprg=
|
||||
sigs.k8s.io/controller-runtime v0.10.1 h1:+eLHgY/VrJWnfg6iXUqhCUqNXgPH1NZeP9drNAAgWlg=
|
||||
sigs.k8s.io/controller-runtime v0.10.1/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY=
|
||||
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs=
|
||||
sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go=
|
||||
|
@ -21,7 +21,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
@ -53,11 +55,11 @@ type ControllerServer struct {
|
||||
func (cs *ControllerServer) createBackingVolume(
|
||||
ctx context.Context,
|
||||
volOptions,
|
||||
parentVolOpt *volumeOptions,
|
||||
parentVolOpt *core.VolumeOptions,
|
||||
|
||||
vID,
|
||||
pvID *volumeIdentifier,
|
||||
sID *snapshotIdentifier) error {
|
||||
pvID *core.VolumeIdentifier,
|
||||
sID *core.SnapshotIdentifier) error {
|
||||
var err error
|
||||
if sID != nil {
|
||||
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
@ -67,7 +69,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
|
||||
|
||||
err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
|
||||
err = core.CreateCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
@ -83,14 +85,14 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
|
||||
err = createCloneFromSubvolume(
|
||||
err = core.CreateCloneFromSubvolume(
|
||||
ctx,
|
||||
volumeID(pvID.FsSubvolName),
|
||||
volumeID(vID.FsSubvolName),
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vID.FsSubvolName),
|
||||
volOptions,
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -98,7 +100,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
if err = core.CreateVolume(ctx, volOptions, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
@ -110,7 +112,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
func checkContentSource(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*volumeOptions, *volumeIdentifier, *snapshotIdentifier, error) {
|
||||
cr *util.Credentials) (*core.VolumeOptions, *core.VolumeIdentifier, *core.SnapshotIdentifier, error) {
|
||||
if req.VolumeContentSource == nil {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
@ -118,7 +120,7 @@ func checkContentSource(
|
||||
switch volumeSource.Type.(type) {
|
||||
case *csi.VolumeContentSource_Snapshot:
|
||||
snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId()
|
||||
volOpt, _, sid, err := newSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
volOpt, _, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
@ -131,7 +133,7 @@ func checkContentSource(
|
||||
case *csi.VolumeContentSource_Volume:
|
||||
// Find the volume using the provided VolumeID
|
||||
volID := req.VolumeContentSource.GetVolume().GetVolumeId()
|
||||
parentVol, pvID, err := newVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
|
||||
parentVol, pvID, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
|
||||
@ -177,7 +179,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(requestName)
|
||||
|
||||
volOptions, err := newVolumeOptions(ctx, requestName, req, cr)
|
||||
volOptions, err := core.NewVolumeOptions(ctx, requestName, req, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
@ -197,9 +199,9 @@ func (cs *ControllerServer) CreateVolume(
|
||||
defer parentVol.Destroy()
|
||||
}
|
||||
|
||||
vID, err := checkVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
|
||||
vID, err := core.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
|
||||
if err != nil {
|
||||
if isCloneRetryError(err) {
|
||||
if cerrors.IsCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
@ -230,15 +232,15 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}
|
||||
|
||||
// Reservation
|
||||
vID, err = reserveVol(ctx, volOptions, secret)
|
||||
vID, err = core.ReserveVol(ctx, volOptions, secret)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !isCloneRetryError(err) {
|
||||
errDefer := undoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if !cerrors.IsCloneRetryError(err) {
|
||||
errDefer := core.UndoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
requestName, errDefer)
|
||||
@ -250,16 +252,16 @@ func (cs *ControllerServer) CreateVolume(
|
||||
// Create a volume
|
||||
err = cs.createBackingVolume(ctx, volOptions, parentVol, vID, pvID, sID)
|
||||
if err != nil {
|
||||
if isCloneRetryError(err) {
|
||||
if cerrors.IsCloneRetryError(err) {
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vID.FsSubvolName))
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vID.FsSubvolName))
|
||||
if err != nil {
|
||||
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true)
|
||||
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true)
|
||||
if purgeErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
@ -311,7 +313,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volID := volumeID(req.GetVolumeId())
|
||||
volID := fsutil.VolumeID(req.GetVolumeId())
|
||||
secrets := req.GetSecrets()
|
||||
|
||||
// lock out parallel delete operations
|
||||
@ -331,7 +333,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId())
|
||||
|
||||
// Find the volume using the provided VolumeID
|
||||
volOptions, vID, err := newVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
|
||||
volOptions, vID, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
|
||||
if err != nil {
|
||||
// if error is ErrPoolNotFound, the pool is already deleted we dont
|
||||
// need to worry about deleting subvolume or omap data, return success
|
||||
@ -362,7 +364,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(volOptions.RequestName)
|
||||
|
||||
if err = undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
if err = core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -386,7 +388,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
if err = volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false); err != nil {
|
||||
if err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
@ -397,7 +399,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
}
|
||||
|
||||
if err := undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
if err := core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -460,7 +462,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
volOptions, volIdentifier, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
@ -470,8 +472,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
|
||||
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
|
||||
if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
|
||||
if err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -497,7 +499,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
clusterData, err := getClusterInformation(req.GetParameters())
|
||||
clusterData, err := core.GetClusterInformation(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -521,7 +523,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
defer cs.OperationLocks.ReleaseSnapshotCreateLock(sourceVolID)
|
||||
|
||||
// Find the volume using the provided VolumeID
|
||||
parentVolOptions, vid, err := newVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
|
||||
parentVolOptions, vid, err := core.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
|
||||
@ -545,7 +547,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
parentVolOptions.ClusterID)
|
||||
}
|
||||
|
||||
cephfsSnap, genSnapErr := genSnapFromOptions(ctx, req)
|
||||
cephfsSnap, genSnapErr := core.GenSnapFromOptions(ctx, req)
|
||||
if genSnapErr != nil {
|
||||
return nil, status.Error(codes.Internal, genSnapErr.Error())
|
||||
}
|
||||
@ -558,7 +560,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
defer cs.VolumeLocks.Release(sourceVolID)
|
||||
snapName := req.GetName()
|
||||
sid, snapInfo, err := checkSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
sid, snapInfo, err := core.CheckSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -568,18 +570,18 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
// as we are not able to retrieve the parent size we are rejecting the
|
||||
// request to create snapshot.
|
||||
// TODO: For this purpose we could make use of cached clusterAdditionalInfo too.
|
||||
info, err := parentVolOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName))
|
||||
info, err := parentVolOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
if err != nil {
|
||||
// Check error code value against ErrInvalidCommand to understand the cluster
|
||||
// support it or not, It's safe to evaluate as the filtering
|
||||
// is already done from getSubVolumeInfo() and send out the error here.
|
||||
// is already done from GetSubVolumeInfo() and send out the error here.
|
||||
if errors.Is(err, cerrors.ErrInvalidCommand) {
|
||||
return nil, status.Error(
|
||||
codes.FailedPrecondition,
|
||||
"subvolume info command not supported in current ceph cluster")
|
||||
}
|
||||
if sid != nil {
|
||||
errDefer := undoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
|
||||
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
@ -592,8 +594,8 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if sid != nil {
|
||||
// check snapshot is protected
|
||||
protected := true
|
||||
if !(snapInfo.Protected == snapshotIsProtected) {
|
||||
err = parentVolOptions.protectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(vid.FsSubvolName))
|
||||
if !(snapInfo.Protected == core.SnapshotIsProtected) {
|
||||
err = parentVolOptions.ProtectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(vid.FsSubvolName))
|
||||
if err != nil {
|
||||
protected = false
|
||||
log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
|
||||
@ -613,13 +615,13 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
|
||||
// Reservation
|
||||
sID, err := reserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
sID, err := core.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
errDefer := undoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
|
||||
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
|
||||
if errDefer != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
@ -642,11 +644,15 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}, nil
|
||||
}
|
||||
|
||||
func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snapshotName string) (snapshotInfo, error) {
|
||||
volID := volumeID(subvolumeName)
|
||||
snapID := volumeID(snapshotName)
|
||||
snap := snapshotInfo{}
|
||||
err := volOpt.createSnapshot(ctx, snapID, volID)
|
||||
func doSnapshot(
|
||||
ctx context.Context,
|
||||
volOpt *core.VolumeOptions,
|
||||
subvolumeName,
|
||||
snapshotName string) (core.SnapshotInfo, error) {
|
||||
volID := fsutil.VolumeID(subvolumeName)
|
||||
snapID := fsutil.VolumeID(snapshotName)
|
||||
snap := core.SnapshotInfo{}
|
||||
err := volOpt.CreateSnapshot(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
|
||||
|
||||
@ -654,25 +660,25 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
dErr := volOpt.deleteSnapshot(ctx, snapID, volID)
|
||||
dErr := volOpt.DeleteSnapshot(ctx, snapID, volID)
|
||||
if dErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID)
|
||||
snap, err = volOpt.GetSnapshotInfo(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
|
||||
|
||||
return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID)
|
||||
}
|
||||
var t *timestamp.Timestamp
|
||||
t, err = parseTime(ctx, snap.CreatedAt)
|
||||
t, err = fsutil.ParseTime(ctx, snap.CreatedAt)
|
||||
if err != nil {
|
||||
return snap, err
|
||||
}
|
||||
snap.CreationTime = t
|
||||
err = volOpt.protectSnapshot(ctx, snapID, volID)
|
||||
err = volOpt.ProtectSnapshot(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
|
||||
}
|
||||
@ -736,7 +742,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
|
||||
|
||||
volOpt, snapInfo, sid, err := newSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
volOpt, snapInfo, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
@ -751,7 +757,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// success as deletion is complete
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
case errors.Is(err, cerrors.ErrSnapNotFound):
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
@ -764,7 +770,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// if the error is ErrVolumeNotFound, the subvolume is already deleted
|
||||
// from backend, Hence undo the omap entries and return success
|
||||
log.ErrorLog(ctx, "Volume not present")
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
@ -791,17 +797,17 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
if snapInfo.HasPendingClones == "yes" {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s has pending clones", snapshotID)
|
||||
}
|
||||
if snapInfo.Protected == snapshotIsProtected {
|
||||
err = volOpt.unprotectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
|
||||
if snapInfo.Protected == core.SnapshotIsProtected {
|
||||
err = volOpt.UnprotectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
err = volOpt.deleteSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
|
||||
err = volOpt.DeleteSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
|
@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
@ -39,8 +40,8 @@ const (
|
||||
// cephFSCloneComplete indicates that clone is in complete state.
|
||||
cephFSCloneComplete = cephFSCloneState("complete")
|
||||
|
||||
// snapshotIsProtected string indicates that the snapshot is currently protected.
|
||||
snapshotIsProtected = "yes"
|
||||
// SnapshotIsProtected string indicates that the snapshot is currently protected.
|
||||
SnapshotIsProtected = "yes"
|
||||
)
|
||||
|
||||
// toError checks the state of the clone if it's not cephFSCloneComplete.
|
||||
@ -61,9 +62,13 @@ func (cs cephFSCloneState) toError() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volOpt, parentvolOpt *volumeOptions) error {
|
||||
func CreateCloneFromSubvolume(
|
||||
ctx context.Context,
|
||||
volID, cloneID fsutil.VolumeID,
|
||||
volOpt,
|
||||
parentvolOpt *VolumeOptions) error {
|
||||
snapshotID := cloneID
|
||||
err := parentvolOpt.createSnapshot(ctx, snapshotID, volID)
|
||||
err := parentvolOpt.CreateSnapshot(ctx, snapshotID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
|
||||
|
||||
@ -77,17 +82,17 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
)
|
||||
defer func() {
|
||||
if protectErr != nil {
|
||||
err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID)
|
||||
err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if cloneErr != nil {
|
||||
if err = volOpt.purgeVolume(ctx, cloneID, true); err != nil {
|
||||
if err = volOpt.PurgeVolume(ctx, cloneID, true); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
||||
}
|
||||
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
@ -95,12 +100,12 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID)
|
||||
protectErr = parentvolOpt.ProtectSnapshot(ctx, snapshotID, volID)
|
||||
if protectErr != nil {
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
|
||||
|
||||
@ -127,14 +132,14 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
return cloneState.toError()
|
||||
}
|
||||
// This is a work around to fix sizing issue for cloned images
|
||||
err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size)
|
||||
err = volOpt.ResizeVolume(ctx, cloneID, volOpt.Size)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
// As we completed clone, remove the intermediate snap
|
||||
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
@ -144,7 +149,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
@ -155,12 +160,12 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
|
||||
func cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx context.Context,
|
||||
volID, cloneID volumeID,
|
||||
parentVolOpt *volumeOptions) error {
|
||||
volID, cloneID fsutil.VolumeID,
|
||||
parentVolOpt *VolumeOptions) error {
|
||||
// snapshot name is same as clone name as we need a name which can be
|
||||
// identified during PVC-PVC cloning.
|
||||
snapShotID := cloneID
|
||||
snapInfo, err := parentVolOpt.getSnapshotInfo(ctx, snapShotID, volID)
|
||||
snapInfo, err := parentVolOpt.GetSnapshotInfo(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
return nil
|
||||
@ -169,15 +174,15 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
return err
|
||||
}
|
||||
|
||||
if snapInfo.Protected == snapshotIsProtected {
|
||||
err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID)
|
||||
if snapInfo.Protected == SnapshotIsProtected {
|
||||
err = parentVolOpt.UnprotectSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID)
|
||||
err = parentVolOpt.DeleteSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
|
||||
|
||||
@ -187,33 +192,32 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
return nil
|
||||
}
|
||||
|
||||
// isCloneRetryError returns true if the clone error is pending,in-progress
|
||||
// error.
|
||||
func isCloneRetryError(err error) bool {
|
||||
return errors.Is(err, cerrors.ErrCloneInProgress) || errors.Is(err, cerrors.ErrClonePending)
|
||||
}
|
||||
|
||||
func createCloneFromSnapshot(
|
||||
func CreateCloneFromSnapshot(
|
||||
ctx context.Context,
|
||||
parentVolOpt, volOptions *volumeOptions,
|
||||
vID *volumeIdentifier,
|
||||
sID *snapshotIdentifier) error {
|
||||
snapID := volumeID(sID.FsSnapshotName)
|
||||
err := parentVolOpt.cloneSnapshot(ctx, volumeID(sID.FsSubvolName), snapID, volumeID(vID.FsSubvolName), volOptions)
|
||||
parentVolOpt, volOptions *VolumeOptions,
|
||||
vID *VolumeIdentifier,
|
||||
sID *SnapshotIdentifier) error {
|
||||
snapID := fsutil.VolumeID(sID.FsSnapshotName)
|
||||
err := parentVolOpt.cloneSnapshot(
|
||||
ctx,
|
||||
fsutil.VolumeID(sID.FsSubvolName),
|
||||
snapID,
|
||||
fsutil.VolumeID(vID.FsSubvolName),
|
||||
volOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !isCloneRetryError(err) {
|
||||
if dErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true); dErr != nil {
|
||||
if !cerrors.IsCloneRetryError(err) {
|
||||
if dErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true); dErr != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName))
|
||||
cloneState, err := volOptions.getCloneState(ctx, fsutil.VolumeID(vID.FsSubvolName))
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get clone state: %v", err)
|
||||
|
||||
@ -226,7 +230,7 @@ func createCloneFromSnapshot(
|
||||
// The clonedvolume currently does not reflect the proper size due to an issue in cephfs
|
||||
// however this is getting addressed in cephfs and the parentvolume size will be reflected
|
||||
// in the new cloned volume too. Till then we are explicitly making the size set
|
||||
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
|
||||
err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
|
||||
|
||||
@ -236,7 +240,7 @@ func createCloneFromSnapshot(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cephFSCloneState, error) {
|
||||
func (vo *VolumeOptions) getCloneState(ctx context.Context, volID fsutil.VolumeID) (cephFSCloneState, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -25,7 +25,7 @@ import (
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
func (vo *VolumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
@ -51,7 +51,7 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
return 0, cerrors.ErrVolumeNotFound
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
func (vo *VolumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
@ -75,7 +75,7 @@ func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
return "", fmt.Errorf("%w: could not find metadata pool for %s", util.ErrPoolNotFound, vo.FsName)
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) {
|
||||
func (vo *VolumeOptions) getFsName(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -22,20 +22,32 @@ import (
|
||||
"fmt"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// volumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
|
||||
var (
|
||||
// VolJournal is used to maintain RADOS based journals for CO generated.
|
||||
// VolumeName to backing CephFS subvolumes.
|
||||
VolJournal *journal.Config
|
||||
|
||||
// SnapJournal is used to maintain RADOS based journals for CO generated.
|
||||
// SnapshotName to backing CephFS subvolumes.
|
||||
SnapJournal *journal.Config
|
||||
)
|
||||
|
||||
// VolumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
|
||||
// name on the backing CephFS instance.
|
||||
type volumeIdentifier struct {
|
||||
type VolumeIdentifier struct {
|
||||
FsSubvolName string
|
||||
VolumeID string
|
||||
}
|
||||
|
||||
type snapshotIdentifier struct {
|
||||
type SnapshotIdentifier struct {
|
||||
FsSnapshotName string
|
||||
SnapshotID string
|
||||
RequestName string
|
||||
@ -44,7 +56,7 @@ type snapshotIdentifier struct {
|
||||
}
|
||||
|
||||
/*
|
||||
checkVolExists checks to determine if passed in RequestName in volOptions exists on the backend.
|
||||
CheckVolExists checks to determine if passed in RequestName in volOptions exists on the backend.
|
||||
|
||||
**NOTE:** These functions manipulate the rados omaps that hold information regarding
|
||||
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
|
||||
@ -58,16 +70,16 @@ request name lock, and hence any stale omaps are leftovers from incomplete trans
|
||||
hence safe to garbage collect.
|
||||
*/
|
||||
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
|
||||
func checkVolExists(ctx context.Context,
|
||||
func CheckVolExists(ctx context.Context,
|
||||
volOptions,
|
||||
parentVolOpt *volumeOptions,
|
||||
parentVolOpt *VolumeOptions,
|
||||
|
||||
pvID *volumeIdentifier,
|
||||
sID *snapshotIdentifier,
|
||||
cr *util.Credentials) (*volumeIdentifier, error) {
|
||||
var vid volumeIdentifier
|
||||
pvID *VolumeIdentifier,
|
||||
sID *SnapshotIdentifier,
|
||||
cr *util.Credentials) (*VolumeIdentifier, error) {
|
||||
var vid VolumeIdentifier
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -85,13 +97,13 @@ func checkVolExists(ctx context.Context,
|
||||
vid.FsSubvolName = imageData.ImageAttributes.ImageName
|
||||
|
||||
if sID != nil || pvID != nil {
|
||||
cloneState, cloneStateErr := volOptions.getCloneState(ctx, volumeID(vid.FsSubvolName))
|
||||
cloneState, cloneStateErr := volOptions.getCloneState(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
if cloneStateErr != nil {
|
||||
if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) {
|
||||
if pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx, volumeID(pvID.FsSubvolName),
|
||||
volumeID(vid.FsSubvolName),
|
||||
ctx, fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -112,7 +124,7 @@ func checkVolExists(ctx context.Context,
|
||||
return nil, cerrors.ErrClonePending
|
||||
}
|
||||
if cloneState == cephFSCloneFailed {
|
||||
err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true)
|
||||
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||
|
||||
@ -120,8 +132,8 @@ func checkVolExists(ctx context.Context,
|
||||
}
|
||||
if pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx, volumeID(pvID.FsSubvolName),
|
||||
volumeID(vid.FsSubvolName),
|
||||
ctx, fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -136,7 +148,7 @@ func checkVolExists(ctx context.Context,
|
||||
return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName)
|
||||
}
|
||||
}
|
||||
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vid.FsSubvolName))
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrVolumeNotFound) {
|
||||
// If the subvolume is not present, cleanup the stale snapshot
|
||||
@ -144,8 +156,8 @@ func checkVolExists(ctx context.Context,
|
||||
if parentVolOpt != nil && pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx,
|
||||
volumeID(pvID.FsSubvolName),
|
||||
volumeID(vid.FsSubvolName),
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -168,7 +180,7 @@ func checkVolExists(ctx context.Context,
|
||||
|
||||
// found a volume already available, process and return it!
|
||||
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
|
||||
"", volOptions.ClusterID, imageUUID, volIDVersion)
|
||||
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -179,8 +191,8 @@ func checkVolExists(ctx context.Context,
|
||||
if parentVolOpt != nil && pvID != nil {
|
||||
err = cleanupCloneFromSubvolumeSnapshot(
|
||||
ctx,
|
||||
volumeID(pvID.FsSubvolName),
|
||||
volumeID(vid.FsSubvolName),
|
||||
fsutil.VolumeID(pvID.FsSubvolName),
|
||||
fsutil.VolumeID(vid.FsSubvolName),
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -190,11 +202,11 @@ func checkVolExists(ctx context.Context,
|
||||
return &vid, nil
|
||||
}
|
||||
|
||||
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName.
|
||||
func undoVolReservation(
|
||||
// UndoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName.
|
||||
func UndoVolReservation(
|
||||
ctx context.Context,
|
||||
volOptions *volumeOptions,
|
||||
vid volumeIdentifier,
|
||||
volOptions *VolumeOptions,
|
||||
vid VolumeIdentifier,
|
||||
secret map[string]string) error {
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
@ -203,7 +215,7 @@ func undoVolReservation(
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -215,7 +227,7 @@ func undoVolReservation(
|
||||
return err
|
||||
}
|
||||
|
||||
func updateTopologyConstraints(volOpts *volumeOptions) error {
|
||||
func updateTopologyConstraints(volOpts *VolumeOptions) error {
|
||||
// update request based on topology constrained parameters (if present)
|
||||
poolName, _, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
|
||||
if err != nil {
|
||||
@ -229,11 +241,11 @@ func updateTopologyConstraints(volOpts *volumeOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// reserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
|
||||
// ReserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
|
||||
// to generate the volume identifier for the reserved UUID.
|
||||
func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
|
||||
func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[string]string) (*VolumeIdentifier, error) {
|
||||
var (
|
||||
vid volumeIdentifier
|
||||
vid VolumeIdentifier
|
||||
imageUUID string
|
||||
err error
|
||||
)
|
||||
@ -250,7 +262,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
}
|
||||
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -266,7 +278,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
|
||||
// generate the volume ID to return to the CO system
|
||||
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
|
||||
"", volOptions.ClusterID, imageUUID, volIDVersion)
|
||||
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -277,22 +289,22 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
return &vid, nil
|
||||
}
|
||||
|
||||
// reserveSnap is a helper routine to request a UUID reservation for the CSI SnapName and,
|
||||
// ReserveSnap is a helper routine to request a UUID reservation for the CSI SnapName and,
|
||||
// to generate the snapshot identifier for the reserved UUID.
|
||||
func reserveSnap(
|
||||
func ReserveSnap(
|
||||
ctx context.Context,
|
||||
volOptions *volumeOptions,
|
||||
volOptions *VolumeOptions,
|
||||
parentSubVolName string,
|
||||
snap *cephfsSnapshot,
|
||||
cr *util.Credentials) (*snapshotIdentifier, error) {
|
||||
snap *CephfsSnapshot,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, error) {
|
||||
var (
|
||||
vid snapshotIdentifier
|
||||
vid SnapshotIdentifier
|
||||
imageUUID string
|
||||
err error
|
||||
)
|
||||
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -308,7 +320,7 @@ func reserveSnap(
|
||||
|
||||
// generate the snapshot ID to return to the CO system
|
||||
vid.SnapshotID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
|
||||
"", volOptions.ClusterID, imageUUID, volIDVersion)
|
||||
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -319,15 +331,15 @@ func reserveSnap(
|
||||
return &vid, nil
|
||||
}
|
||||
|
||||
// undoSnapReservation is a helper routine to undo a name reservation for a CSI SnapshotName.
|
||||
func undoSnapReservation(
|
||||
// UndoSnapReservation is a helper routine to undo a name reservation for a CSI SnapshotName.
|
||||
func UndoSnapReservation(
|
||||
ctx context.Context,
|
||||
volOptions *volumeOptions,
|
||||
vid snapshotIdentifier,
|
||||
volOptions *VolumeOptions,
|
||||
vid SnapshotIdentifier,
|
||||
snapName string,
|
||||
cr *util.Credentials) error {
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -340,7 +352,7 @@ func undoSnapReservation(
|
||||
}
|
||||
|
||||
/*
|
||||
checkSnapExists checks to determine if passed in RequestName in volOptions exists on the backend.
|
||||
CheckSnapExists checks to determine if passed in RequestName in volOptions exists on the backend.
|
||||
|
||||
**NOTE:** These functions manipulate the rados omaps that hold information regarding
|
||||
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
|
||||
@ -353,14 +365,14 @@ because, the order of omap creation and deletion are inverse of each other, and
|
||||
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
|
||||
hence safe to garbage collect.
|
||||
*/
|
||||
func checkSnapExists(
|
||||
func CheckSnapExists(
|
||||
ctx context.Context,
|
||||
volOptions *volumeOptions,
|
||||
volOptions *VolumeOptions,
|
||||
parentSubVolName string,
|
||||
snap *cephfsSnapshot,
|
||||
cr *util.Credentials) (*snapshotIdentifier, *snapshotInfo, error) {
|
||||
snap *CephfsSnapshot,
|
||||
cr *util.Credentials) (*SnapshotIdentifier, *SnapshotInfo, error) {
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -374,11 +386,11 @@ func checkSnapExists(
|
||||
if snapData == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
sid := &snapshotIdentifier{}
|
||||
sid := &SnapshotIdentifier{}
|
||||
snapUUID := snapData.ImageUUID
|
||||
snapID := snapData.ImageAttributes.ImageName
|
||||
sid.FsSnapshotName = snapData.ImageAttributes.ImageName
|
||||
snapInfo, err := volOptions.getSnapshotInfo(ctx, volumeID(snapID), volumeID(parentSubVolName))
|
||||
snapInfo, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
|
||||
if err != nil {
|
||||
if errors.Is(err, cerrors.ErrSnapNotFound) {
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
@ -392,7 +404,7 @@ func checkSnapExists(
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName))
|
||||
err = volOptions.DeleteSnapshot(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
|
||||
|
||||
@ -405,7 +417,7 @@ func checkSnapExists(
|
||||
}
|
||||
}
|
||||
}()
|
||||
tm, err := parseTime(ctx, snapInfo.CreatedAt)
|
||||
tm, err := fsutil.ParseTime(ctx, snapInfo.CreatedAt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -413,7 +425,7 @@ func checkSnapExists(
|
||||
sid.CreationTime = tm
|
||||
// found a snapshot already available, process and return it!
|
||||
sid.SnapshotID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
|
||||
"", volOptions.ClusterID, snapUUID, volIDVersion)
|
||||
"", volOptions.ClusterID, snapUUID, fsutil.VolIDVersion)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/ceph/go-ceph/cephfs/admin"
|
||||
@ -35,8 +36,8 @@ const (
|
||||
autoProtect = "snapshot-autoprotect"
|
||||
)
|
||||
|
||||
// cephfsSnapshot represents a CSI snapshot and its cluster information.
|
||||
type cephfsSnapshot struct {
|
||||
// CephfsSnapshot represents a CSI snapshot and its cluster information.
|
||||
type CephfsSnapshot struct {
|
||||
NamePrefix string
|
||||
Monitors string
|
||||
// MetadataPool & Pool fields are not used atm. But its definitely good to have it in this struct
|
||||
@ -49,7 +50,7 @@ type cephfsSnapshot struct {
|
||||
ReservedID string
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
@ -68,7 +69,7 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
func (vo *VolumeOptions) DeleteSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
@ -87,15 +88,15 @@ func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volum
|
||||
return nil
|
||||
}
|
||||
|
||||
type snapshotInfo struct {
|
||||
type SnapshotInfo struct {
|
||||
CreatedAt time.Time
|
||||
CreationTime *timestamp.Timestamp
|
||||
HasPendingClones string
|
||||
Protected string
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volumeID) (snapshotInfo, error) {
|
||||
snap := snapshotInfo{}
|
||||
func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsutil.VolumeID) (SnapshotInfo, error) {
|
||||
snap := SnapshotInfo{}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
@ -125,7 +126,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
// If "snapshot-autoprotect" feature is present, The ProtectSnapshot
|
||||
// call should be treated as a no-op.
|
||||
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
|
||||
@ -158,7 +159,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
|
||||
// If "snapshot-autoprotect" feature is present, The UnprotectSnapshot
|
||||
// call should be treated as a no-op.
|
||||
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
|
||||
@ -193,10 +194,10 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) cloneSnapshot(
|
||||
func (vo *VolumeOptions) cloneSnapshot(
|
||||
ctx context.Context,
|
||||
volID, snapID, cloneID volumeID,
|
||||
cloneVolOptions *volumeOptions,
|
||||
volID, snapID, cloneID fsutil.VolumeID,
|
||||
cloneVolOptions *VolumeOptions,
|
||||
) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
@ -39,8 +40,6 @@ import (
|
||||
var clusterAdditionalInfo = make(map[string]*localClusterState)
|
||||
|
||||
const (
|
||||
cephEntityClientPrefix = "client."
|
||||
|
||||
// modeAllRWX can be used for setting permissions to Read-Write-eXecute
|
||||
// for User, Group and Other.
|
||||
modeAllRWX = 0o777
|
||||
@ -54,11 +53,11 @@ type Subvolume struct {
|
||||
Features []string
|
||||
}
|
||||
|
||||
func getVolumeRootPathCephDeprecated(volID volumeID) string {
|
||||
func GetVolumeRootPathCephDeprecated(volID fsutil.VolumeID) string {
|
||||
return path.Join("/", "csi-volumes", string(volID))
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volumeID) (string, error) {
|
||||
func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil.VolumeID) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
|
||||
@ -78,7 +77,7 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
|
||||
return svPath, nil
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (*Subvolume, error) {
|
||||
func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.VolumeID) (*Subvolume, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
@ -141,7 +140,7 @@ type localClusterState struct {
|
||||
subVolumeGroupCreated bool
|
||||
}
|
||||
|
||||
func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID, bytesQuota int64) error {
|
||||
func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.VolumeID, bytesQuota int64) error {
|
||||
// verify if corresponding ClusterID key is present in the map,
|
||||
// and if not, initialize with default values(false).
|
||||
if _, keyPresent := clusterAdditionalInfo[volOptions.ClusterID]; !keyPresent {
|
||||
@ -192,10 +191,10 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
return nil
|
||||
}
|
||||
|
||||
// resizeVolume will try to use ceph fs subvolume resize command to resize the
|
||||
// ResizeVolume will try to use ceph fs subvolume resize command to resize the
|
||||
// subvolume. If the command is not available as a fallback it will use
|
||||
// CreateVolume to resize the subvolume.
|
||||
func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytesQuota int64) error {
|
||||
func (vo *VolumeOptions) ResizeVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error {
|
||||
// keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo
|
||||
var keyPresent bool
|
||||
// verify if corresponding ClusterID key is present in the map,
|
||||
@ -229,10 +228,10 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
||||
}
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState = unsupported
|
||||
|
||||
return createVolume(ctx, vo, volID, bytesQuota)
|
||||
return CreateVolume(ctx, vo, volID, bytesQuota)
|
||||
}
|
||||
|
||||
func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force bool) error {
|
||||
func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID, force bool) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "could not get FSAdmin %s:", err)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -26,13 +26,12 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
type volumeID string
|
||||
|
||||
type volumeOptions struct {
|
||||
type VolumeOptions struct {
|
||||
TopologyPools *[]util.TopologyConstrainedPool
|
||||
TopologyRequirement *csi.TopologyRequirement
|
||||
Topology map[string]string
|
||||
@ -60,7 +59,7 @@ type volumeOptions struct {
|
||||
}
|
||||
|
||||
// Connect a CephFS volume to the Ceph cluster.
|
||||
func (vo *volumeOptions) Connect(cr *util.Credentials) error {
|
||||
func (vo *VolumeOptions) Connect(cr *util.Credentials) error {
|
||||
if vo.conn != nil {
|
||||
return nil
|
||||
}
|
||||
@ -77,7 +76,7 @@ func (vo *volumeOptions) Connect(cr *util.Credentials) error {
|
||||
|
||||
// Destroy cleans up the CephFS volume object and closes the connection to the
|
||||
// Ceph cluster in case one was setup.
|
||||
func (vo *volumeOptions) Destroy() {
|
||||
func (vo *VolumeOptions) Destroy() {
|
||||
if vo.conn != nil {
|
||||
vo.conn.Destroy()
|
||||
}
|
||||
@ -124,8 +123,8 @@ func extractOption(dest *string, optionLabel string, options map[string]string)
|
||||
|
||||
func validateMounter(m string) error {
|
||||
switch m {
|
||||
case volumeMounterFuse:
|
||||
case volumeMounterKernel:
|
||||
case "fuse":
|
||||
case "kernel":
|
||||
default:
|
||||
return fmt.Errorf("unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m)
|
||||
}
|
||||
@ -147,7 +146,7 @@ func extractMounter(dest *string, options map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getClusterInformation(options map[string]string) (*util.ClusterInfo, error) {
|
||||
func GetClusterInformation(options map[string]string) (*util.ClusterInfo, error) {
|
||||
clusterID, ok := options["clusterID"]
|
||||
if !ok {
|
||||
err := fmt.Errorf("clusterID must be set")
|
||||
@ -181,17 +180,17 @@ func getClusterInformation(options map[string]string) (*util.ClusterInfo, error)
|
||||
return clusterData, nil
|
||||
}
|
||||
|
||||
// newVolumeOptions generates a new instance of volumeOptions from the provided
|
||||
// NewVolumeOptions generates a new instance of volumeOptions from the provided
|
||||
// CSI request parameters.
|
||||
func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*volumeOptions, error) {
|
||||
func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
|
||||
cr *util.Credentials) (*VolumeOptions, error) {
|
||||
var (
|
||||
opts volumeOptions
|
||||
opts VolumeOptions
|
||||
err error
|
||||
)
|
||||
|
||||
volOptions := req.GetParameters()
|
||||
clusterData, err := getClusterInformation(volOptions)
|
||||
clusterData, err := GetClusterInformation(volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -259,16 +258,16 @@ func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
|
||||
return &opts, nil
|
||||
}
|
||||
|
||||
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and volumeIdentifier
|
||||
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier
|
||||
// from the provided CSI VolumeID.
|
||||
func newVolumeOptionsFromVolID(
|
||||
func NewVolumeOptionsFromVolID(
|
||||
ctx context.Context,
|
||||
volID string,
|
||||
volOpt, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
|
||||
volOpt, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
volOptions volumeOptions
|
||||
vid volumeIdentifier
|
||||
volOptions VolumeOptions
|
||||
vid VolumeIdentifier
|
||||
)
|
||||
|
||||
// Decode the VolID first, to detect older volumes or pre-provisioned volumes
|
||||
@ -320,7 +319,7 @@ func newVolumeOptionsFromVolID(
|
||||
}
|
||||
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -358,27 +357,27 @@ func newVolumeOptionsFromVolID(
|
||||
|
||||
volOptions.ProvisionVolume = true
|
||||
|
||||
info, err := volOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName))
|
||||
info, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
if err == nil {
|
||||
volOptions.RootPath = info.Path
|
||||
volOptions.Features = info.Features
|
||||
}
|
||||
|
||||
if errors.Is(err, cerrors.ErrInvalidCommand) {
|
||||
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vid.FsSubvolName))
|
||||
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
|
||||
}
|
||||
|
||||
return &volOptions, &vid, err
|
||||
}
|
||||
|
||||
// newVolumeOptionsFromMonitorList generates a new instance of volumeOptions and
|
||||
// volumeIdentifier from the provided CSI volume context.
|
||||
func newVolumeOptionsFromMonitorList(
|
||||
// NewVolumeOptionsFromMonitorList generates a new instance of VolumeOptions and
|
||||
// VolumeIdentifier from the provided CSI volume context.
|
||||
func NewVolumeOptionsFromMonitorList(
|
||||
volID string,
|
||||
options, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
|
||||
options, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
opts volumeOptions
|
||||
vid volumeIdentifier
|
||||
opts VolumeOptions
|
||||
vid VolumeIdentifier
|
||||
provisionVolumeBool string
|
||||
err error
|
||||
)
|
||||
@ -408,7 +407,7 @@ func newVolumeOptionsFromMonitorList(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts.RootPath = getVolumeRootPathCephDeprecated(volumeID(volID))
|
||||
opts.RootPath = GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID))
|
||||
} else {
|
||||
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
|
||||
return nil, nil, err
|
||||
@ -433,15 +432,15 @@ func newVolumeOptionsFromMonitorList(
|
||||
return &opts, &vid, nil
|
||||
}
|
||||
|
||||
// newVolumeOptionsFromStaticVolume generates a new instance of volumeOptions and
|
||||
// volumeIdentifier from the provided CSI volume context, if the provided context is
|
||||
// NewVolumeOptionsFromStaticVolume generates a new instance of volumeOptions and
|
||||
// VolumeIdentifier from the provided CSI volume context, if the provided context is
|
||||
// detected to be a statically provisioned volume.
|
||||
func newVolumeOptionsFromStaticVolume(
|
||||
func NewVolumeOptionsFromStaticVolume(
|
||||
volID string,
|
||||
options map[string]string) (*volumeOptions, *volumeIdentifier, error) {
|
||||
options map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
|
||||
var (
|
||||
opts volumeOptions
|
||||
vid volumeIdentifier
|
||||
opts VolumeOptions
|
||||
vid VolumeIdentifier
|
||||
staticVol bool
|
||||
err error
|
||||
)
|
||||
@ -463,7 +462,7 @@ func newVolumeOptionsFromStaticVolume(
|
||||
// store NOT of static boolean
|
||||
opts.ProvisionVolume = !staticVol
|
||||
|
||||
clusterData, err := getClusterInformation(options)
|
||||
clusterData, err := GetClusterInformation(options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -502,16 +501,16 @@ func newVolumeOptionsFromStaticVolume(
|
||||
return &opts, &vid, nil
|
||||
}
|
||||
|
||||
// newSnapshotOptionsFromID generates a new instance of volumeOptions and snapshotIdentifier
|
||||
// NewSnapshotOptionsFromID generates a new instance of volumeOptions and SnapshotIdentifier
|
||||
// from the provided CSI VolumeID.
|
||||
func newSnapshotOptionsFromID(
|
||||
func NewSnapshotOptionsFromID(
|
||||
ctx context.Context,
|
||||
snapID string,
|
||||
cr *util.Credentials) (*volumeOptions, *snapshotInfo, *snapshotIdentifier, error) {
|
||||
cr *util.Credentials) (*VolumeOptions, *SnapshotInfo, *SnapshotIdentifier, error) {
|
||||
var (
|
||||
vi util.CSIIdentifier
|
||||
volOptions volumeOptions
|
||||
sid snapshotIdentifier
|
||||
volOptions VolumeOptions
|
||||
sid SnapshotIdentifier
|
||||
)
|
||||
// Decode the snapID first, to detect pre-provisioned snapshot before other errors
|
||||
err := vi.DecomposeCSIID(snapID)
|
||||
@ -560,7 +559,7 @@ func newSnapshotOptionsFromID(
|
||||
}
|
||||
|
||||
// Connect to cephfs' default radosNamespace (csi)
|
||||
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
|
||||
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
}
|
||||
@ -576,13 +575,13 @@ func newSnapshotOptionsFromID(
|
||||
sid.FsSnapshotName = imageAttributes.ImageName
|
||||
sid.FsSubvolName = imageAttributes.SourceName
|
||||
|
||||
subvolInfo, err := volOptions.getSubVolumeInfo(ctx, volumeID(sid.FsSubvolName))
|
||||
subvolInfo, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(sid.FsSubvolName))
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
}
|
||||
volOptions.Features = subvolInfo.Features
|
||||
|
||||
info, err := volOptions.getSnapshotInfo(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
|
||||
info, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
|
||||
if err != nil {
|
||||
return &volOptions, nil, &sid, err
|
||||
}
|
||||
@ -590,12 +589,16 @@ func newSnapshotOptionsFromID(
|
||||
return &volOptions, &info, &sid, nil
|
||||
}
|
||||
|
||||
func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *cephfsSnapshot, err error) {
|
||||
cephfsSnap := &cephfsSnapshot{}
|
||||
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *CephfsSnapshot, err error) {
|
||||
cephfsSnap := &CephfsSnapshot{}
|
||||
cephfsSnap.RequestName = req.GetName()
|
||||
snapOptions := req.GetParameters()
|
||||
|
||||
cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
|
||||
clusterID, err := util.GetClusterID(snapOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(ctx, clusterID, false)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
@ -17,6 +17,9 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
@ -25,14 +28,6 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
)
|
||||
|
||||
const (
|
||||
// volIDVersion is the version number of volume ID encoding scheme.
|
||||
volIDVersion uint16 = 1
|
||||
|
||||
// RADOS namespace to store CSI specific objects and keys.
|
||||
radosNamespace = "csi"
|
||||
)
|
||||
|
||||
// Driver contains the default identity,node and controller struct.
|
||||
type Driver struct {
|
||||
cd *csicommon.CSIDriver
|
||||
@ -42,19 +37,9 @@ type Driver struct {
|
||||
cs *ControllerServer
|
||||
}
|
||||
|
||||
var (
|
||||
// CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing
|
||||
// ceph clusters across CSI instances, to differentiate omap names per CSI instance.
|
||||
CSIInstanceID = "default"
|
||||
|
||||
// volJournal is used to maintain RADOS based journals for CO generated
|
||||
// VolumeName to backing CephFS subvolumes.
|
||||
volJournal *journal.Config
|
||||
|
||||
// snapJournal is used to maintain RADOS based journals for CO generated
|
||||
// SnapshotName to backing CephFS subvolumes.
|
||||
snapJournal *journal.Config
|
||||
)
|
||||
// CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing
|
||||
// ceph clusters across CSI instances, to differentiate omap names per CSI instance.
|
||||
var CSIInstanceID = "default"
|
||||
|
||||
// NewDriver returns new ceph driver.
|
||||
func NewDriver() *Driver {
|
||||
@ -93,22 +78,18 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
var topology map[string]string
|
||||
|
||||
// Configuration
|
||||
if err = loadAvailableMounters(conf); err != nil {
|
||||
if err = mounter.LoadAvailableMounters(conf); err != nil {
|
||||
log.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err)
|
||||
}
|
||||
|
||||
if err = util.WriteCephConfig(); err != nil {
|
||||
log.FatalLogMsg("failed to write ceph configuration file: %v", err)
|
||||
}
|
||||
|
||||
// Use passed in instance ID, if provided for omap suffix naming
|
||||
if conf.InstanceID != "" {
|
||||
CSIInstanceID = conf.InstanceID
|
||||
}
|
||||
// Create an instance of the volume journal
|
||||
volJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, radosNamespace)
|
||||
core.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
|
||||
snapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, radosNamespace)
|
||||
core.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
|
||||
// Initialize default library driver
|
||||
|
||||
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
|
||||
|
@ -62,3 +62,9 @@ var (
|
||||
// ErrVolumeHasSnapshots is returned when a subvolume has snapshots.
|
||||
ErrVolumeHasSnapshots = coreError.New("volume has snapshots")
|
||||
)
|
||||
|
||||
// IsCloneRetryError returns true if the clone error is pending,in-progress
|
||||
// error.
|
||||
func IsCloneRetryError(err error) bool {
|
||||
return coreError.Is(err, ErrCloneInProgress) || coreError.Is(err, ErrClonePending)
|
||||
}
|
||||
|
142
internal/cephfs/mounter/fuse.go
Normal file
142
internal/cephfs/mounter/fuse.go
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mounter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeMounterFuse = "fuse"
|
||||
|
||||
cephEntityClientPrefix = "client."
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// maps a mountpoint to PID of its FUSE daemon.
|
||||
fusePidMap = make(map[string]int)
|
||||
fusePidMapMtx sync.Mutex
|
||||
|
||||
fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`)
|
||||
)
|
||||
|
||||
type FuseMounter struct{}
|
||||
|
||||
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error {
|
||||
args := []string{
|
||||
mountPoint,
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile,
|
||||
"-r", volOptions.RootPath,
|
||||
}
|
||||
|
||||
fmo := "nonempty"
|
||||
if volOptions.FuseMountOptions != "" {
|
||||
fmo += "," + strings.TrimSpace(volOptions.FuseMountOptions)
|
||||
}
|
||||
args = append(args, "-o", fmo)
|
||||
|
||||
if volOptions.FsName != "" {
|
||||
args = append(args, "--client_mds_namespace="+volOptions.FsName)
|
||||
}
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "ceph-fuse", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
// Parse the output:
|
||||
// We need "starting fuse" meaning the mount is ok
|
||||
// and PID of the ceph-fuse daemon for unmount
|
||||
|
||||
match := fusePidRx.FindSubmatch([]byte(stderr))
|
||||
// validMatchLength is set to 2 as match is expected
|
||||
// to have 2 items, starting fuse and PID of the fuse daemon
|
||||
const validMatchLength = 2
|
||||
if len(match) != validMatchLength {
|
||||
return fmt.Errorf("ceph-fuse failed: %s", stderr)
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(match[1]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse FUSE daemon PID: %w", err)
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
fusePidMap[mountPoint] = pid
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FuseMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *core.VolumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountFuse(ctx, mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *FuseMounter) Name() string { return "Ceph FUSE driver" }
|
||||
|
||||
func UnmountVolume(ctx context.Context, mountPoint string) error {
|
||||
if _, stderr, err := util.ExecCommand(ctx, "umount", mountPoint); err != nil {
|
||||
err = fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
if strings.Contains(err.Error(), fmt.Sprintf("umount: %s: not mounted", mountPoint)) ||
|
||||
strings.Contains(err.Error(), "No such file or directory") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
pid, ok := fusePidMap[mountPoint]
|
||||
if ok {
|
||||
delete(fusePidMap, mountPoint)
|
||||
}
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
if ok {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
log.WarningLog(ctx, "failed to find process %d: %v", pid, err)
|
||||
} else {
|
||||
if _, err = p.Wait(); err != nil {
|
||||
log.WarningLog(ctx, "%d is not a child process: %v", pid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
74
internal/cephfs/mounter/kernel.go
Normal file
74
internal/cephfs/mounter/kernel.go
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mounter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeMounterKernel = "kernel"
|
||||
netDev = "_netdev"
|
||||
)
|
||||
|
||||
type KernelMounter struct{}
|
||||
|
||||
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error {
|
||||
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-t", "ceph",
|
||||
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
||||
mountPoint,
|
||||
}
|
||||
|
||||
optionsStr := fmt.Sprintf("name=%s,secretfile=%s", cr.ID, cr.KeyFile)
|
||||
mdsNamespace := ""
|
||||
if volOptions.FsName != "" {
|
||||
mdsNamespace = fmt.Sprintf("mds_namespace=%s", volOptions.FsName)
|
||||
}
|
||||
optionsStr = util.MountOptionsAdd(optionsStr, mdsNamespace, volOptions.KernelMountOptions, netDev)
|
||||
|
||||
args = append(args, "-o", optionsStr)
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "mount", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *KernelMounter) Mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *core.VolumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountKernel(ctx, mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *KernelMounter) Name() string { return "Ceph kernel client" }
|
154
internal/cephfs/mounter/volumemounter.go
Normal file
154
internal/cephfs/mounter/volumemounter.go
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright 2018 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mounter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
var (
|
||||
availableMounters []string
|
||||
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
quotaSupport = []util.KernelVersion{
|
||||
{
|
||||
Version: 4,
|
||||
PatchLevel: 17,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 0, Distribution: "",
|
||||
Backport: false,
|
||||
}, // standard 4.17+ versions
|
||||
{
|
||||
Version: 3,
|
||||
PatchLevel: 10,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 1062,
|
||||
Distribution: ".el7",
|
||||
Backport: true,
|
||||
}, // RHEL-7.7
|
||||
}
|
||||
)
|
||||
|
||||
func execCommandErr(ctx context.Context, program string, args ...string) error {
|
||||
_, _, err := util.ExecCommand(ctx, program, args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Load available ceph mounters installed on system into availableMounters
|
||||
// Called from driver.go's Run().
|
||||
func LoadAvailableMounters(conf *util.Config) error {
|
||||
// #nosec
|
||||
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
|
||||
// #nosec
|
||||
kernelMounterProbe := exec.Command("mount.ceph")
|
||||
|
||||
err := kernelMounterProbe.Run()
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to run mount.ceph %v", err)
|
||||
} else {
|
||||
// fetch the current running kernel info
|
||||
release, kvErr := util.GetKernelVersion()
|
||||
if kvErr != nil {
|
||||
return kvErr
|
||||
}
|
||||
|
||||
if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) {
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterKernel)
|
||||
availableMounters = append(availableMounters, volumeMounterKernel)
|
||||
} else {
|
||||
log.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
}
|
||||
}
|
||||
|
||||
err = fuseMounterProbe.Run()
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to run ceph-fuse %v", err)
|
||||
} else {
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterFuse)
|
||||
availableMounters = append(availableMounters, volumeMounterFuse)
|
||||
}
|
||||
|
||||
if len(availableMounters) == 0 {
|
||||
return errors.New("no ceph mounters found on system")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type VolumeMounter interface {
|
||||
Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error
|
||||
Name() string
|
||||
}
|
||||
|
||||
func New(volOptions *core.VolumeOptions) (VolumeMounter, error) {
|
||||
// Get the mounter from the configuration
|
||||
|
||||
wantMounter := volOptions.Mounter
|
||||
|
||||
// Verify that it's available
|
||||
|
||||
var chosenMounter string
|
||||
|
||||
for _, availMounter := range availableMounters {
|
||||
if availMounter == wantMounter {
|
||||
chosenMounter = wantMounter
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if chosenMounter == "" {
|
||||
// Otherwise pick whatever is left
|
||||
chosenMounter = availableMounters[0]
|
||||
log.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
}
|
||||
|
||||
// Create the mounter
|
||||
switch chosenMounter {
|
||||
case volumeMounterFuse:
|
||||
return &FuseMounter{}, nil
|
||||
case volumeMounterKernel:
|
||||
return &KernelMounter{}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown mounter '%s'", chosenMounter)
|
||||
}
|
||||
|
||||
func BindMount(ctx context.Context, from, to string, readOnly bool, mntOptions []string) error {
|
||||
mntOptionSli := strings.Join(mntOptions, ",")
|
||||
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, from, to); err != nil {
|
||||
return fmt.Errorf("failed to bind-mount %s to %s: %w", from, to, err)
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
mntOptionSli = util.MountOptionsAdd(mntOptionSli, "remount")
|
||||
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, to); err != nil {
|
||||
return fmt.Errorf("failed read-only remount of %s: %w", to, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -23,7 +23,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/core"
|
||||
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
||||
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
@ -42,7 +45,9 @@ type NodeServer struct {
|
||||
VolumeLocks *util.VolumeLocks
|
||||
}
|
||||
|
||||
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
|
||||
func getCredentialsForVolume(
|
||||
volOptions *core.VolumeOptions,
|
||||
req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
|
||||
var (
|
||||
err error
|
||||
cr *util.Credentials
|
||||
@ -72,7 +77,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolume
|
||||
func (ns *NodeServer) NodeStageVolume(
|
||||
ctx context.Context,
|
||||
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
var volOptions *volumeOptions
|
||||
var volOptions *core.VolumeOptions
|
||||
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -80,7 +85,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
// Configuration
|
||||
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
volID := volumeID(req.GetVolumeId())
|
||||
volID := fsutil.VolumeID(req.GetVolumeId())
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
@ -89,21 +94,21 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer ns.VolumeLocks.Release(req.GetVolumeId())
|
||||
|
||||
volOptions, _, err := newVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
|
||||
volOptions, _, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrInvalidVolID) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// gets mon IPs from the supplied cluster info
|
||||
volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
|
||||
volOptions, _, err = core.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
|
||||
if err != nil {
|
||||
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// get mon IPs from the volume context
|
||||
volOptions, _, err = newVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
|
||||
volOptions, _, err = core.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
|
||||
req.GetSecrets())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -137,9 +142,9 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
|
||||
func (*NodeServer) mount(ctx context.Context, volOptions *core.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
volID := volumeID(req.GetVolumeId())
|
||||
volID := fsutil.VolumeID(req.GetVolumeId())
|
||||
|
||||
cr, err := getCredentialsForVolume(volOptions, req)
|
||||
if err != nil {
|
||||
@ -149,14 +154,14 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
m, err := newMounter(volOptions)
|
||||
m, err := mounter.New(volOptions)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
|
||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.Name())
|
||||
|
||||
readOnly := "ro"
|
||||
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
|
||||
@ -165,12 +170,12 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
||||
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
||||
switch m.(type) {
|
||||
case *fuseMounter:
|
||||
case *mounter.FuseMounter:
|
||||
if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) {
|
||||
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly)
|
||||
fuseMountOptions = append(fuseMountOptions, readOnly)
|
||||
}
|
||||
case *kernelMounter:
|
||||
case *mounter.KernelMounter:
|
||||
if !csicommon.MountOptionContains(strings.Split(volOptions.KernelMountOptions, ","), readOnly) {
|
||||
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, readOnly)
|
||||
kernelMountOptions = append(kernelMountOptions, readOnly)
|
||||
@ -178,7 +183,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
}
|
||||
}
|
||||
|
||||
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
|
||||
if err = m.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
|
||||
log.ErrorLog(ctx,
|
||||
"failed to mount volume %s: %v Check dmesg logs if required.",
|
||||
volID,
|
||||
@ -197,7 +202,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
stagingTargetPath,
|
||||
volID,
|
||||
err)
|
||||
uErr := unmountVolume(ctx, stagingTargetPath)
|
||||
uErr := mounter.UnmountVolume(ctx, stagingTargetPath)
|
||||
if uErr != nil {
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
@ -259,7 +264,12 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
// It's not, mount now
|
||||
|
||||
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
|
||||
if err = mounter.BindMount(
|
||||
ctx,
|
||||
req.GetStagingTargetPath(),
|
||||
req.GetTargetPath(),
|
||||
req.GetReadonly(),
|
||||
mountOptions); err != nil {
|
||||
log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -301,7 +311,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
}
|
||||
|
||||
// Unmount the bind-mount
|
||||
if err = unmountVolume(ctx, targetPath); err != nil {
|
||||
if err = mounter.UnmountVolume(ctx, targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -349,7 +359,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
// Unmount the volume
|
||||
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
|
||||
if err = mounter.UnmountVolume(ctx, stagingTargetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -26,7 +26,18 @@ import (
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
|
||||
// VolumeID string representation.
|
||||
type VolumeID string
|
||||
|
||||
const (
|
||||
// VolIDVersion is the version number of volume ID encoding scheme.
|
||||
VolIDVersion uint16 = 1
|
||||
|
||||
// RadosNamespace to store CSI specific objects and keys.
|
||||
RadosNamespace = "csi"
|
||||
)
|
||||
|
||||
func ParseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
|
||||
tm, err := ptypes.TimestampProto(createTime)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)
|
@ -1,310 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeMounterFuse = "fuse"
|
||||
volumeMounterKernel = "kernel"
|
||||
netDev = "_netdev"
|
||||
)
|
||||
|
||||
var (
|
||||
availableMounters []string
|
||||
|
||||
// maps a mountpoint to PID of its FUSE daemon.
|
||||
fusePidMap = make(map[string]int)
|
||||
fusePidMapMtx sync.Mutex
|
||||
|
||||
fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`)
|
||||
|
||||
// nolint:gomnd // numbers specify Kernel versions.
|
||||
quotaSupport = []util.KernelVersion{
|
||||
{
|
||||
Version: 4,
|
||||
PatchLevel: 17,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 0, Distribution: "",
|
||||
Backport: false,
|
||||
}, // standard 4.17+ versions
|
||||
{
|
||||
Version: 3,
|
||||
PatchLevel: 10,
|
||||
SubLevel: 0,
|
||||
ExtraVersion: 1062,
|
||||
Distribution: ".el7",
|
||||
Backport: true,
|
||||
}, // RHEL-7.7
|
||||
}
|
||||
)
|
||||
|
||||
func execCommandErr(ctx context.Context, program string, args ...string) error {
|
||||
_, _, err := util.ExecCommand(ctx, program, args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Load available ceph mounters installed on system into availableMounters
|
||||
// Called from driver.go's Run().
|
||||
func loadAvailableMounters(conf *util.Config) error {
|
||||
// #nosec
|
||||
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
|
||||
// #nosec
|
||||
kernelMounterProbe := exec.Command("mount.ceph")
|
||||
|
||||
err := kernelMounterProbe.Run()
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to run mount.ceph %v", err)
|
||||
} else {
|
||||
// fetch the current running kernel info
|
||||
release, kvErr := util.GetKernelVersion()
|
||||
if kvErr != nil {
|
||||
return kvErr
|
||||
}
|
||||
|
||||
if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) {
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterKernel)
|
||||
availableMounters = append(availableMounters, volumeMounterKernel)
|
||||
} else {
|
||||
log.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
}
|
||||
}
|
||||
|
||||
err = fuseMounterProbe.Run()
|
||||
if err != nil {
|
||||
log.ErrorLogMsg("failed to run ceph-fuse %v", err)
|
||||
} else {
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterFuse)
|
||||
availableMounters = append(availableMounters, volumeMounterFuse)
|
||||
}
|
||||
|
||||
if len(availableMounters) == 0 {
|
||||
return errors.New("no ceph mounters found on system")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type volumeMounter interface {
|
||||
mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error
|
||||
name() string
|
||||
}
|
||||
|
||||
func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
||||
// Get the mounter from the configuration
|
||||
|
||||
wantMounter := volOptions.Mounter
|
||||
|
||||
// Verify that it's available
|
||||
|
||||
var chosenMounter string
|
||||
|
||||
for _, availMounter := range availableMounters {
|
||||
if availMounter == wantMounter {
|
||||
chosenMounter = wantMounter
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if chosenMounter == "" {
|
||||
// Otherwise pick whatever is left
|
||||
chosenMounter = availableMounters[0]
|
||||
log.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
}
|
||||
|
||||
// Create the mounter
|
||||
|
||||
switch chosenMounter {
|
||||
case volumeMounterFuse:
|
||||
return &fuseMounter{}, nil
|
||||
case volumeMounterKernel:
|
||||
return &kernelMounter{}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown mounter '%s'", chosenMounter)
|
||||
}
|
||||
|
||||
type fuseMounter struct{}
|
||||
|
||||
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
|
||||
args := []string{
|
||||
mountPoint,
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", util.CephConfigPath,
|
||||
"-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile,
|
||||
"-r", volOptions.RootPath,
|
||||
}
|
||||
|
||||
fmo := "nonempty"
|
||||
if volOptions.FuseMountOptions != "" {
|
||||
fmo += "," + strings.TrimSpace(volOptions.FuseMountOptions)
|
||||
}
|
||||
args = append(args, "-o", fmo)
|
||||
|
||||
if volOptions.FsName != "" {
|
||||
args = append(args, "--client_mds_namespace="+volOptions.FsName)
|
||||
}
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "ceph-fuse", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
// Parse the output:
|
||||
// We need "starting fuse" meaning the mount is ok
|
||||
// and PID of the ceph-fuse daemon for unmount
|
||||
|
||||
match := fusePidRx.FindSubmatch([]byte(stderr))
|
||||
// validMatchLength is set to 2 as match is expected
|
||||
// to have 2 items, starting fuse and PID of the fuse daemon
|
||||
const validMatchLength = 2
|
||||
if len(match) != validMatchLength {
|
||||
return fmt.Errorf("ceph-fuse failed: %s", stderr)
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(match[1]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse FUSE daemon PID: %w", err)
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
fusePidMap[mountPoint] = pid
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fuseMounter) mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *volumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountFuse(ctx, mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
|
||||
|
||||
type kernelMounter struct{}
|
||||
|
||||
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
|
||||
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-t", "ceph",
|
||||
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
||||
mountPoint,
|
||||
}
|
||||
|
||||
optionsStr := fmt.Sprintf("name=%s,secretfile=%s", cr.ID, cr.KeyFile)
|
||||
mdsNamespace := ""
|
||||
if volOptions.FsName != "" {
|
||||
mdsNamespace = fmt.Sprintf("mds_namespace=%s", volOptions.FsName)
|
||||
}
|
||||
optionsStr = util.MountOptionsAdd(optionsStr, mdsNamespace, volOptions.KernelMountOptions, netDev)
|
||||
|
||||
args = append(args, "-o", optionsStr)
|
||||
|
||||
_, stderr, err := util.ExecCommand(ctx, "mount", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *kernelMounter) mount(
|
||||
ctx context.Context,
|
||||
mountPoint string,
|
||||
cr *util.Credentials,
|
||||
volOptions *volumeOptions) error {
|
||||
if err := util.CreateMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountKernel(ctx, mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *kernelMounter) name() string { return "Ceph kernel client" }
|
||||
|
||||
func bindMount(ctx context.Context, from, to string, readOnly bool, mntOptions []string) error {
|
||||
mntOptionSli := strings.Join(mntOptions, ",")
|
||||
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, from, to); err != nil {
|
||||
return fmt.Errorf("failed to bind-mount %s to %s: %w", from, to, err)
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
mntOptionSli = util.MountOptionsAdd(mntOptionSli, "remount")
|
||||
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, to); err != nil {
|
||||
return fmt.Errorf("failed read-only remount of %s: %w", to, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmountVolume(ctx context.Context, mountPoint string) error {
|
||||
if _, stderr, err := util.ExecCommand(ctx, "umount", mountPoint); err != nil {
|
||||
err = fmt.Errorf("%w stderr: %s", err, stderr)
|
||||
if strings.Contains(err.Error(), fmt.Sprintf("umount: %s: not mounted", mountPoint)) ||
|
||||
strings.Contains(err.Error(), "No such file or directory") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
pid, ok := fusePidMap[mountPoint]
|
||||
if ok {
|
||||
delete(fusePidMap, mountPoint)
|
||||
}
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
if ok {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
log.WarningLog(ctx, "failed to find process %d: %v", pid, err)
|
||||
} else {
|
||||
if _, err = p.Wait(); err != nil {
|
||||
log.WarningLog(ctx, "%d is not a child process: %v", pid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -261,21 +261,21 @@ func (cj *Config) Connect(monitors, namespace string, cr *util.Credentials) (*Co
|
||||
|
||||
/*
|
||||
CheckReservation checks if given request name contains a valid reservation
|
||||
- If there is a valid reservation, then the corresponding UUID for the volume/snapshot is returned
|
||||
- If there is a valid reservation, then the corresponding ImageData for the volume/snapshot is returned
|
||||
- If there is a reservation that is stale (or not fully cleaned up), it is garbage collected using
|
||||
the UndoReservation call, as appropriate
|
||||
- If a snapshot is being checked, then its source is matched to the parentName that is provided
|
||||
- If a snapshot is being checked, then its source is matched to the snapParentName that is provided
|
||||
|
||||
NOTE: As the function manipulates omaps, it should be called with a lock against the request name
|
||||
held, to prevent parallel operations from modifying the state of the omaps for this request name.
|
||||
|
||||
Return values:
|
||||
- string: Contains the UUID that was reserved for the passed in reqName, empty if
|
||||
there was no reservation found
|
||||
- ImageData: which contains the UUID,Pool,PoolID and ImageAttributes that were reserved for the
|
||||
passed in reqName, empty if there was no reservation found
|
||||
- error: non-nil in case of any errors
|
||||
*/
|
||||
func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
journalPool, reqName, namePrefix, parentName, kmsConfig string) (*ImageData, error) {
|
||||
journalPool, reqName, namePrefix, snapParentName, kmsConfig string) (*ImageData, error) {
|
||||
var (
|
||||
snapSource bool
|
||||
objUUID string
|
||||
@ -284,7 +284,7 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
cj = conn.config
|
||||
)
|
||||
|
||||
if parentName != "" {
|
||||
if snapParentName != "" {
|
||||
if cj.cephSnapSourceKey == "" {
|
||||
err := errors.New("invalid request, cephSnapSourceKey is nil")
|
||||
|
||||
@ -378,12 +378,12 @@ func (conn *Connection) CheckReservation(ctx context.Context,
|
||||
|
||||
if snapSource {
|
||||
// check if source UUID key points back to the parent volume passed in
|
||||
if savedImageAttributes.SourceName != parentName {
|
||||
if savedImageAttributes.SourceName != snapParentName {
|
||||
// NOTE: This can happen if there is a snapname conflict, and we already have a snapshot
|
||||
// with the same name pointing to a different UUID as the source
|
||||
err = fmt.Errorf("%w: snapname points to different volume, request name (%s)"+
|
||||
" source name (%s) saved source name (%s)", util.ErrSnapNameConflict,
|
||||
reqName, parentName, savedImageAttributes.SourceName)
|
||||
" source name (%s) : saved source name (%s)", util.ErrSnapNameConflict,
|
||||
reqName, snapParentName, savedImageAttributes.SourceName)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func (rv *rbdVolume) createCloneFromImage(ctx context.Context, parentVol *rbdVol
|
||||
}
|
||||
|
||||
if parentVol.isEncrypted() {
|
||||
err = parentVol.copyEncryptionConfig(&rv.rbdImage)
|
||||
err = parentVol.copyEncryptionConfig(&rv.rbdImage, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy encryption config for %q: %w", rv, err)
|
||||
}
|
||||
|
@ -123,7 +123,10 @@ func (cs *ControllerServer) parseVolCreateRequest(
|
||||
}
|
||||
|
||||
// if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks
|
||||
rbdVol, err := genVolFromVolumeOptions(ctx, req.GetParameters(), req.GetSecrets(), (isMultiNode && isBlock))
|
||||
rbdVol, err := genVolFromVolumeOptions(
|
||||
ctx,
|
||||
req.GetParameters(), req.GetSecrets(),
|
||||
(isMultiNode && isBlock), false)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -600,6 +603,10 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to mark %q thick-provisioned: %s", rbdVol, err)
|
||||
}
|
||||
err = parentVol.copyEncryptionConfig(&rbdVol.rbdImage, true)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
// create clone image and delete snapshot
|
||||
err = rbdVol.cloneRbdImageFromSnapshot(ctx, rbdSnap, parentVol)
|
||||
@ -830,6 +837,16 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
|
||||
|
||||
if isMigrationVolID(volumeID) {
|
||||
log.DebugLog(ctx, "migration volume ID : %s", volumeID)
|
||||
err = parseAndDeleteMigratedVolume(ctx, volumeID, cr)
|
||||
if err != nil && !errors.Is(err, ErrImageNotFound) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||
defer rbdVol.Destroy()
|
||||
if err != nil {
|
||||
@ -1092,7 +1109,7 @@ func cloneFromSnapshot(
|
||||
defer vol.Destroy()
|
||||
|
||||
if rbdVol.isEncrypted() {
|
||||
err = rbdVol.copyEncryptionConfig(&vol.rbdImage)
|
||||
err = rbdVol.copyEncryptionConfig(&vol.rbdImage, false)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -1211,7 +1228,7 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
}()
|
||||
|
||||
if parentVol.isEncrypted() {
|
||||
cryptErr := parentVol.copyEncryptionConfig(&cloneRbd.rbdImage)
|
||||
cryptErr := parentVol.copyEncryptionConfig(&cloneRbd.rbdImage, false)
|
||||
if cryptErr != nil {
|
||||
log.WarningLog(ctx, "failed copy encryption "+
|
||||
"config for %q: %v", cloneRbd, cryptErr)
|
||||
|
@ -106,11 +106,6 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
var err error
|
||||
var topology map[string]string
|
||||
|
||||
// Create ceph.conf for use with CLI commands
|
||||
if err = util.WriteCephConfig(); err != nil {
|
||||
log.FatalLogMsg("failed to write ceph configuration file (%v)", err)
|
||||
}
|
||||
|
||||
// Use passed in instance ID, if provided for omap suffix naming
|
||||
if conf.InstanceID != "" {
|
||||
CSIInstanceID = conf.InstanceID
|
||||
|
@ -123,7 +123,11 @@ func (ri *rbdImage) setupEncryption(ctx context.Context) error {
|
||||
// rbdImage to the passed argument. This function re-encrypts the passphrase
|
||||
// from the original, so that both encrypted passphrases (potentially, depends
|
||||
// on the DEKStore) have different contents.
|
||||
func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage) error {
|
||||
// When copyOnlyPassphrase is set to true, only the passphrase is copied to the
|
||||
// destination rbdImage's VolumeEncryption object which needs to be initialized
|
||||
// beforehand and is possibly different from the source VolumeEncryption
|
||||
// (Usecase: Restoring snapshot into a storageclass with different encryption config).
|
||||
func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage, copyOnlyPassphrase bool) error {
|
||||
if ri.VolID == cp.VolID {
|
||||
return fmt.Errorf("BUG: %q and %q have the same VolID (%s) "+
|
||||
"set!? Call stack: %s", ri, cp, ri.VolID, util.CallStack())
|
||||
@ -136,9 +140,11 @@ func (ri *rbdImage) copyEncryptionConfig(cp *rbdImage) error {
|
||||
ri, err)
|
||||
}
|
||||
|
||||
cp.encryption, err = util.NewVolumeEncryption(ri.encryption.GetID(), ri.encryption.KMS)
|
||||
if errors.Is(err, util.ErrDEKStoreNeeded) {
|
||||
cp.encryption.SetDEKStore(cp)
|
||||
if !copyOnlyPassphrase {
|
||||
cp.encryption, err = util.NewVolumeEncryption(ri.encryption.GetID(), ri.encryption.KMS)
|
||||
if errors.Is(err, util.ErrDEKStoreNeeded) {
|
||||
cp.encryption.SetDEKStore(cp)
|
||||
}
|
||||
}
|
||||
|
||||
// re-encrypt the plain passphrase for the cloned volume
|
||||
@ -178,7 +184,7 @@ func (ri *rbdImage) repairEncryptionConfig(dest *rbdImage) error {
|
||||
dest.conn = ri.conn.Copy()
|
||||
}
|
||||
|
||||
return ri.copyEncryptionConfig(dest)
|
||||
return ri.copyEncryptionConfig(dest, false)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -34,4 +34,12 @@ var (
|
||||
ErrMissingStash = errors.New("missing stash")
|
||||
// ErrFlattenInProgress is returned when flatten is in progress for an image.
|
||||
ErrFlattenInProgress = errors.New("flatten in progress")
|
||||
// ErrMissingMonitorsInVolID is returned when monitor information is missing in migration volID.
|
||||
ErrMissingMonitorsInVolID = errors.New("monitor information can not be empty in volID")
|
||||
// ErrMissingPoolNameInVolID is returned when pool information is missing in migration volID.
|
||||
ErrMissingPoolNameInVolID = errors.New("pool information can not be empty in volID")
|
||||
// ErrMissingImageNameInVolID is returned when image name information is missing in migration volID.
|
||||
ErrMissingImageNameInVolID = errors.New("rbd image name information can not be empty in volID")
|
||||
// ErrDecodeClusterIDFromMonsInVolID is returned when mons hash decoding on migration volID.
|
||||
ErrDecodeClusterIDFromMonsInVolID = errors.New("failed to get clusterID from monitors hash in volID")
|
||||
)
|
||||
|
116
internal/rbd/migration.go
Normal file
116
internal/rbd/migration.go
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rbd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
// isMigrationVolID validates if the passed in volID is a volumeID
|
||||
// of a migrated volume.
|
||||
func isMigrationVolID(volHash string) bool {
|
||||
return strings.Contains(volHash, migIdentifier) &&
|
||||
strings.Contains(volHash, migImageNamePrefix) && strings.Contains(volHash, migMonPrefix)
|
||||
}
|
||||
|
||||
// parseMigrationVolID decodes the volume ID and generates a migrationVolID
|
||||
// struct which consists of mon, image name, pool and clusterID information.
|
||||
func parseMigrationVolID(vh string) (*migrationVolID, error) {
|
||||
mh := &migrationVolID{}
|
||||
handSlice := strings.Split(vh, migVolIDFieldSep)
|
||||
if len(handSlice) < migVolIDTotalLength {
|
||||
// its short of length in this case, so return error
|
||||
return nil, ErrInvalidVolID
|
||||
}
|
||||
// Store pool
|
||||
poolHash := strings.Join(handSlice[migVolIDSplitLength:], migVolIDFieldSep)
|
||||
poolByte, dErr := hex.DecodeString(poolHash)
|
||||
if dErr != nil {
|
||||
return nil, ErrMissingPoolNameInVolID
|
||||
}
|
||||
mh.poolName = string(poolByte)
|
||||
// Parse migration mons( for clusterID) and image
|
||||
for _, field := range handSlice[:migVolIDSplitLength] {
|
||||
switch {
|
||||
case strings.Contains(field, migImageNamePrefix):
|
||||
imageSli := strings.Split(field, migImageNamePrefix)
|
||||
if len(imageSli) > 0 {
|
||||
mh.imageName = migInTreeImagePrefix + imageSli[1]
|
||||
}
|
||||
case strings.Contains(field, migMonPrefix):
|
||||
// ex: mons-7982de6a23b77bce50b1ba9f2e879cce
|
||||
mh.clusterID = strings.Trim(field, migMonPrefix)
|
||||
}
|
||||
}
|
||||
if mh.imageName == "" {
|
||||
return nil, ErrMissingImageNameInVolID
|
||||
}
|
||||
if mh.poolName == "" {
|
||||
return nil, ErrMissingPoolNameInVolID
|
||||
}
|
||||
if mh.clusterID == "" {
|
||||
return nil, ErrDecodeClusterIDFromMonsInVolID
|
||||
}
|
||||
|
||||
return mh, nil
|
||||
}
|
||||
|
||||
// parseAndDeleteMigratedVolume get rbd volume details from the migration volID
|
||||
// and delete the volume from the cluster, return err if there was an error on the process.
|
||||
func parseAndDeleteMigratedVolume(ctx context.Context, volumeID string, cr *util.Credentials) error {
|
||||
parsedMigHandle, err := parseMigrationVolID(volumeID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to parse migration volumeID: %s , err: %v", volumeID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
rv := &rbdVolume{}
|
||||
|
||||
// fill details to rv struct from parsed migration handle
|
||||
rv.RbdImageName = parsedMigHandle.imageName
|
||||
rv.Pool = parsedMigHandle.poolName
|
||||
rv.ClusterID = parsedMigHandle.clusterID
|
||||
rv.Monitors, err = util.Mons(util.CsiConfigFile, rv.ClusterID)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to fetch monitors using clusterID: %s, err: %v", rv.ClusterID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// connect to the volume.
|
||||
err = rv.Connect(cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to get connected to the rbd image : %s, err: %v", rv.RbdImageName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
defer rv.Destroy()
|
||||
// if connected , delete it
|
||||
err = deleteImage(ctx, rv, cr)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to delete rbd image : %s, err: %v", rv.RbdImageName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
173
internal/rbd/migration_test.go
Normal file
173
internal/rbd/migration_test.go
Normal file
@ -0,0 +1,173 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package rbd
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsMigrationVolID(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
migVolID bool
|
||||
}{
|
||||
{
|
||||
"correct volume ID",
|
||||
"mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Wrong volume ID",
|
||||
"wrong_volume_ID",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"wrong mons prefixed volume ID",
|
||||
"mig_mon-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
false,
|
||||
},
|
||||
{
|
||||
"wrong image prefixed volume ID",
|
||||
"mig_imae-e0b45b52-7e09-47d3-8f1b-806995fa4412_pool_replica_pool",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"wrong volume ID",
|
||||
"mig_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_pool_replica_pool",
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := isMigrationVolID(tt.args)
|
||||
if got != tt.migVolID {
|
||||
t.Errorf("isMigrationVolID() = %v, want %v", got, tt.migVolID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMigrationVolID(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
want *migrationVolID
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"correct volume ID",
|
||||
"mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
&migrationVolID{
|
||||
// monitors: "10.70.53.126:6789",
|
||||
imageName: "kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412",
|
||||
poolName: "pool_replica_pool",
|
||||
clusterID: "b7f67366bb43f32e07d8a261a7840da9",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"volume ID without mons",
|
||||
"mig_kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"volume ID without image",
|
||||
"mig_pool-706f6f6c5f7265706c6963615f706f6f6c",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"volume ID without pool",
|
||||
"mig",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"correct volume ID with single mon",
|
||||
"mig_mons-7982de6a23b77bce50b1ba9f2e879cce_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
&migrationVolID{
|
||||
// monitors: "10.70.53.126:6789,10.70.53.156:6789",
|
||||
imageName: "kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412",
|
||||
poolName: "pool_replica_pool",
|
||||
clusterID: "7982de6a23b77bce50b1ba9f2e879cce",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"correct volume ID with more than one mon",
|
||||
"mig_mons-7982de6a23b77bce50b1ba9f2e879cce_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
&migrationVolID{
|
||||
// monitors: "10.70.53.126:6789,10.70.53.156:6789",
|
||||
imageName: "kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412",
|
||||
poolName: "pool_replica_pool",
|
||||
clusterID: "7982de6a23b77bce50b1ba9f2e879cce",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"correct volume ID with '_' pool name",
|
||||
"mig_mons-7982de6a23b77bce50b1ba9f2e879cce_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
&migrationVolID{
|
||||
// monitors: "10.70.53.126:6789,10.70.53.156:6789",
|
||||
imageName: "kubernetes-dynamic-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412",
|
||||
poolName: "pool_replica_pool",
|
||||
clusterID: "7982de6a23b77bce50b1ba9f2e879cce",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"volume ID with unallowed migration version string",
|
||||
"migrate-beta_mons-b7f67366bb43f32e07d8a261a7840da9_kubernetes-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"volume ID with unallowed image name",
|
||||
"mig_mons-b7f67366bb43f32e07d8a261a7840da9_kubernetes-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"volume ID without 'mon-' prefix string",
|
||||
"mig_b7f67366bb43f32e07d8a261a7840da9_kubernetes-pvc-e0b45b52-7e09-47d3-8f1b-806995fa4412_706f6f6c5f7265706c6963615f706f6f6c", //nolint:lll // migration volID
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := parseMigrationVolID(tt.args)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ParseMigrationVolID() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -149,6 +149,24 @@ func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *r
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClusterIDFromMigrationVolume fills the clusterID for the passed in monitors.
|
||||
func getClusterIDFromMigrationVolume(monitors string) (string, error) {
|
||||
var err error
|
||||
var rclusterID string
|
||||
for _, m := range strings.Split(monitors, ",") {
|
||||
rclusterID, err = util.GetClusterIDFromMon(m)
|
||||
if err != nil && !errors.Is(err, util.ErrMissingConfigForMonitor) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if rclusterID != "" {
|
||||
return rclusterID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
// populateRbdVol update the fields in rbdVolume struct based on the request it received.
|
||||
func populateRbdVol(
|
||||
ctx context.Context,
|
||||
@ -178,7 +196,7 @@ func populateRbdVol(
|
||||
disableInUseChecks = true
|
||||
}
|
||||
|
||||
rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), req.GetSecrets(), disableInUseChecks)
|
||||
rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), req.GetSecrets(), disableInUseChecks, true)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -188,6 +206,10 @@ func populateRbdVol(
|
||||
// get rbd image name from the volume journal
|
||||
// for static volumes, the image name is actually the volume ID itself
|
||||
if isStaticVol {
|
||||
if req.GetVolumeContext()[intreeMigrationKey] == intreeMigrationLabel {
|
||||
// if migration static volume, use imageName as volID
|
||||
volID = req.GetVolumeContext()["imageName"]
|
||||
}
|
||||
rv.RbdImageName = volID
|
||||
} else {
|
||||
var vi util.CSIIdentifier
|
||||
@ -225,6 +247,10 @@ func populateRbdVol(
|
||||
if rv.LogDir == "" {
|
||||
rv.LogDir = defaultLogDir
|
||||
}
|
||||
rv.LogStrategy = req.GetVolumeContext()["cephLogStrategy"]
|
||||
if rv.LogStrategy == "" {
|
||||
rv.LogStrategy = defaultLogStrategy
|
||||
}
|
||||
|
||||
return rv, err
|
||||
}
|
||||
@ -264,6 +290,16 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
}
|
||||
defer ns.VolumeLocks.Release(volID)
|
||||
|
||||
// Check this is a migration request because in that case, unlike other node stage requests
|
||||
// it will be missing the clusterID, so fill it by fetching it from config file using mon.
|
||||
if req.GetVolumeContext()[intreeMigrationKey] == intreeMigrationLabel && req.VolumeContext[util.ClusterIDKey] == "" {
|
||||
cID, cErr := getClusterIDFromMigrationVolume(req.GetVolumeContext()["monitors"])
|
||||
if cErr != nil {
|
||||
return nil, status.Error(codes.Internal, cErr.Error())
|
||||
}
|
||||
req.VolumeContext[util.ClusterIDKey] = cID
|
||||
}
|
||||
|
||||
stagingParentPath := req.GetStagingTargetPath()
|
||||
stagingTargetPath := stagingParentPath + "/" + volID
|
||||
|
||||
@ -846,8 +882,9 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
volumeID: req.GetVolumeId(),
|
||||
unmapOptions: imgInfo.UnmapOptions,
|
||||
logDir: imgInfo.LogDir,
|
||||
logStrategy: imgInfo.LogStrategy,
|
||||
}
|
||||
if err = detachRBDImageOrDeviceSpec(ctx, dArgs); err != nil {
|
||||
if err = detachRBDImageOrDeviceSpec(ctx, &dArgs); err != nil {
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"error unmapping volume (%s) from staging path (%s): (%v)",
|
||||
|
@ -102,6 +102,7 @@ type detachRBDImageArgs struct {
|
||||
volumeID string
|
||||
unmapOptions string
|
||||
logDir string
|
||||
logStrategy string
|
||||
}
|
||||
|
||||
// rbdGetDeviceList queries rbd about mapped devices and returns a list of rbdDeviceInfo
|
||||
@ -383,8 +384,9 @@ func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.
|
||||
volumeID: volOpt.VolID,
|
||||
unmapOptions: volOpt.UnmapOptions,
|
||||
logDir: volOpt.LogDir,
|
||||
logStrategy: volOpt.LogStrategy,
|
||||
}
|
||||
detErr := detachRBDImageOrDeviceSpec(ctx, dArgs)
|
||||
detErr := detachRBDImageOrDeviceSpec(ctx, &dArgs)
|
||||
if detErr != nil {
|
||||
log.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
|
||||
}
|
||||
@ -436,14 +438,14 @@ func detachRBDDevice(ctx context.Context, devicePath, volumeID, unmapOptions str
|
||||
unmapOptions: unmapOptions,
|
||||
}
|
||||
|
||||
return detachRBDImageOrDeviceSpec(ctx, dArgs)
|
||||
return detachRBDImageOrDeviceSpec(ctx, &dArgs)
|
||||
}
|
||||
|
||||
// detachRBDImageOrDeviceSpec detaches an rbd imageSpec or devicePath, with additional checking
|
||||
// when imageSpec is used to decide if image is already unmapped.
|
||||
func detachRBDImageOrDeviceSpec(
|
||||
ctx context.Context,
|
||||
dArgs detachRBDImageArgs) error {
|
||||
dArgs *detachRBDImageArgs) error {
|
||||
if dArgs.encrypted {
|
||||
mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
|
||||
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
|
||||
@ -490,10 +492,7 @@ func detachRBDImageOrDeviceSpec(
|
||||
}
|
||||
if dArgs.isNbd && dArgs.logDir != "" {
|
||||
logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd")
|
||||
if err = os.Remove(logFile); err != nil {
|
||||
log.WarningLog(ctx, "failed to remove logfile: %s, error: %v",
|
||||
logFile, err)
|
||||
}
|
||||
go strategicActionOnLogFile(ctx, dArgs.logStrategy, logFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -334,7 +334,7 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
|
||||
}
|
||||
|
||||
if parentVol != nil && parentVol.isEncrypted() {
|
||||
err = parentVol.copyEncryptionConfig(&rv.rbdImage)
|
||||
err = parentVol.copyEncryptionConfig(&rv.rbdImage, false)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
|
@ -53,6 +53,7 @@ const (
|
||||
rbdDefaultMounter = "rbd"
|
||||
rbdNbdMounter = "rbd-nbd"
|
||||
defaultLogDir = "/var/log/ceph"
|
||||
defaultLogStrategy = "remove" // supports remove, compress and preserve
|
||||
|
||||
// Output strings returned during invocation of "ceph rbd task add remove <imagespec>" when
|
||||
// command is not supported by ceph manager. Used to check errors and recover when the command
|
||||
@ -72,6 +73,24 @@ const (
|
||||
// thick provisioned or thin provisioned.
|
||||
thickProvisionMetaData = "true"
|
||||
thinProvisionMetaData = "false"
|
||||
|
||||
// migration label key and value for parameters in volume context.
|
||||
intreeMigrationKey = "migration"
|
||||
intreeMigrationLabel = "true"
|
||||
migInTreeImagePrefix = "kubernetes-dynamic-pvc-"
|
||||
// migration volume handle identifiers.
|
||||
// total length of fields in the migration volume handle.
|
||||
migVolIDTotalLength = 4
|
||||
// split boundary length of fields.
|
||||
migVolIDSplitLength = 3
|
||||
// separator for migration handle fields.
|
||||
migVolIDFieldSep = "_"
|
||||
// identifier of a migration vol handle.
|
||||
migIdentifier = "mig"
|
||||
// prefix of image field.
|
||||
migImageNamePrefix = "image-"
|
||||
// prefix in the handle for monitors field.
|
||||
migMonPrefix = "mons-"
|
||||
)
|
||||
|
||||
// rbdImage contains common attributes and methods for the rbdVolume and
|
||||
@ -140,6 +159,7 @@ type rbdVolume struct {
|
||||
MapOptions string
|
||||
UnmapOptions string
|
||||
LogDir string
|
||||
LogStrategy string
|
||||
VolName string `json:"volName"`
|
||||
MonValueFromSecret string `json:"monValueFromSecret"`
|
||||
VolSize int64 `json:"volSize"`
|
||||
@ -169,6 +189,14 @@ type imageFeature struct {
|
||||
dependsOn []string
|
||||
}
|
||||
|
||||
// migrationvolID is a struct which consists of required fields of a rbd volume
|
||||
// from migrated volumeID.
|
||||
type migrationVolID struct {
|
||||
imageName string
|
||||
poolName string
|
||||
clusterID string
|
||||
}
|
||||
|
||||
var supportedFeatures = map[string]imageFeature{
|
||||
librbd.FeatureNameLayering: {
|
||||
needRbdNbd: false,
|
||||
@ -848,11 +876,7 @@ func genSnapFromSnapID(
|
||||
snapshotID string,
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) error {
|
||||
var (
|
||||
options map[string]string
|
||||
vi util.CSIIdentifier
|
||||
)
|
||||
options = make(map[string]string)
|
||||
var vi util.CSIIdentifier
|
||||
|
||||
rbdSnap.VolID = snapshotID
|
||||
|
||||
@ -864,9 +888,8 @@ func genSnapFromSnapID(
|
||||
}
|
||||
|
||||
rbdSnap.ClusterID = vi.ClusterID
|
||||
options["clusterID"] = rbdSnap.ClusterID
|
||||
|
||||
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(options)
|
||||
rbdSnap.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdSnap.ClusterID, false)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
@ -879,7 +902,7 @@ func genSnapFromSnapID(
|
||||
}
|
||||
rbdSnap.JournalPool = rbdSnap.Pool
|
||||
|
||||
rbdSnap.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
|
||||
rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -940,29 +963,25 @@ func generateVolumeFromVolumeID(
|
||||
cr *util.Credentials,
|
||||
secrets map[string]string) (*rbdVolume, error) {
|
||||
var (
|
||||
options map[string]string
|
||||
rbdVol *rbdVolume
|
||||
err error
|
||||
rbdVol *rbdVolume
|
||||
err error
|
||||
)
|
||||
options = make(map[string]string)
|
||||
|
||||
// rbdVolume fields that are not filled up in this function are:
|
||||
// Mounter, MultiNodeWritable
|
||||
rbdVol = &rbdVolume{}
|
||||
rbdVol.VolID = volumeID
|
||||
// TODO check clusterID mapping exists
|
||||
|
||||
rbdVol.ClusterID = vi.ClusterID
|
||||
options["clusterID"] = rbdVol.ClusterID
|
||||
|
||||
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(options)
|
||||
rbdVol.Monitors, _, err = util.GetMonsAndClusterID(ctx, rbdVol.ClusterID, false)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return rbdVol, err
|
||||
}
|
||||
|
||||
rbdVol.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
||||
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
||||
if err != nil {
|
||||
return rbdVol, err
|
||||
}
|
||||
@ -1153,7 +1172,7 @@ func generateVolumeFromMapping(
|
||||
func genVolFromVolumeOptions(
|
||||
ctx context.Context,
|
||||
volOptions, credentials map[string]string,
|
||||
disableInUseChecks bool) (*rbdVolume, error) {
|
||||
disableInUseChecks, checkClusterIDMapping bool) (*rbdVolume, error) {
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
@ -1171,14 +1190,18 @@ func genVolFromVolumeOptions(
|
||||
rbdVol.NamePrefix = namePrefix
|
||||
}
|
||||
|
||||
rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(volOptions)
|
||||
clusterID, err := util.GetClusterID(volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rbdVol.Monitors, rbdVol.ClusterID, err = util.GetMonsAndClusterID(ctx, clusterID, checkClusterIDMapping)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rbdVol.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
||||
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1248,7 +1271,11 @@ func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[
|
||||
rbdSnap.JournalPool = rbdVol.JournalPool
|
||||
rbdSnap.RadosNamespace = rbdVol.RadosNamespace
|
||||
|
||||
rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
|
||||
clusterID, err := util.GetClusterID(snapOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rbdSnap.Monitors, rbdSnap.ClusterID, err = util.GetMonsAndClusterID(ctx, clusterID, false)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
@ -1373,7 +1400,7 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
||||
if pSnapOpts.isEncrypted() {
|
||||
pSnapOpts.conn = rv.conn.Copy()
|
||||
|
||||
err = pSnapOpts.copyEncryptionConfig(&rv.rbdImage)
|
||||
err = pSnapOpts.copyEncryptionConfig(&rv.rbdImage, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clone encryption config: %w", err)
|
||||
}
|
||||
@ -1516,8 +1543,9 @@ type rbdImageMetadataStash struct {
|
||||
UnmapOptions string `json:"unmapOptions"`
|
||||
NbdAccess bool `json:"accessType"`
|
||||
Encrypted bool `json:"encrypted"`
|
||||
DevicePath string `json:"device"` // holds NBD device path for now
|
||||
LogDir string `json:"logDir"` // holds the client log path
|
||||
DevicePath string `json:"device"` // holds NBD device path for now
|
||||
LogDir string `json:"logDir"` // holds the client log path
|
||||
LogStrategy string `json:"logFileStrategy"` // ceph client log strategy
|
||||
}
|
||||
|
||||
// file name in which image metadata is stashed.
|
||||
@ -1549,6 +1577,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, metaDataPath string) error {
|
||||
if volOptions.Mounter == rbdTonbd && hasNBD {
|
||||
imgMeta.NbdAccess = true
|
||||
imgMeta.LogDir = volOptions.LogDir
|
||||
imgMeta.LogStrategy = volOptions.LogStrategy
|
||||
}
|
||||
|
||||
encodedBytes, err := json.Marshal(imgMeta)
|
||||
@ -2022,3 +2051,23 @@ func CheckSliceContains(options []string, opt string) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// strategicActionOnLogFile act on log file based on cephLogStrategy.
|
||||
func strategicActionOnLogFile(ctx context.Context, logStrategy, logFile string) {
|
||||
var err error
|
||||
|
||||
switch strings.ToLower(logStrategy) {
|
||||
case "compress":
|
||||
if err = log.GzipLogFile(logFile); err != nil {
|
||||
log.ErrorLog(ctx, "failed to compress logfile %q: %v", logFile, err)
|
||||
}
|
||||
case "remove":
|
||||
if err = os.Remove(logFile); err != nil {
|
||||
log.ErrorLog(ctx, "failed to remove logfile %q: %v", logFile, err)
|
||||
}
|
||||
case "preserve":
|
||||
// do nothing
|
||||
default:
|
||||
log.ErrorLog(ctx, "unknown cephLogStrategy option %q: hint: 'remove'|'compress'|'preserve'", logStrategy)
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,9 @@ limitations under the License.
|
||||
package rbd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@ -208,3 +211,75 @@ func TestGetCephClientLogFileName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategicActionOnLogFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
var logFile [3]string
|
||||
for i := 0; i < 3; i++ {
|
||||
f, err := ioutil.TempFile(tmpDir, "rbd-*.log")
|
||||
if err != nil {
|
||||
t.Errorf("creating tempfile failed: %v", err)
|
||||
}
|
||||
logFile[i] = f.Name()
|
||||
}
|
||||
|
||||
type args struct {
|
||||
logStrategy string
|
||||
logFile string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{
|
||||
name: "test for compress",
|
||||
args: args{
|
||||
logStrategy: "compress",
|
||||
logFile: logFile[0],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test for remove",
|
||||
args: args{
|
||||
logStrategy: "remove",
|
||||
logFile: logFile[1],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test for preserve",
|
||||
args: args{
|
||||
logStrategy: "preserve",
|
||||
logFile: logFile[2],
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
strategicActionOnLogFile(ctx, tt.args.logStrategy, tt.args.logFile)
|
||||
|
||||
var err error
|
||||
switch tt.args.logStrategy {
|
||||
case "compress":
|
||||
newExt := strings.Replace(tt.args.logFile, ".log", ".gz", -1)
|
||||
if _, err = os.Stat(newExt); os.IsNotExist(err) {
|
||||
t.Errorf("compressed logFile (%s) not found: %v", newExt, err)
|
||||
}
|
||||
os.Remove(newExt)
|
||||
case "remove":
|
||||
if _, err = os.Stat(tt.args.logFile); !os.IsNotExist(err) {
|
||||
t.Errorf("logFile (%s) not removed: %v", tt.args.logFile, err)
|
||||
}
|
||||
case "preserve":
|
||||
if _, err = os.Stat(tt.args.logFile); os.IsNotExist(err) {
|
||||
t.Errorf("logFile (%s) not preserved: %v", tt.args.logFile, err)
|
||||
}
|
||||
os.Remove(tt.args.logFile)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -599,14 +599,13 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
|
||||
mirrorStatus, err := rbdVol.getImageMirroringStatus()
|
||||
if err != nil {
|
||||
// the image gets recreated after issuing resync in that case return
|
||||
// volume as not ready.
|
||||
// the image gets recreated after issuing resync
|
||||
if errors.Is(err, ErrImageNotFound) {
|
||||
resp := &replication.ResyncVolumeResponse{
|
||||
Ready: false,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
// caller retries till RBD syncs an initial version of the image to
|
||||
// report its status in the resync call. Ideally, this line will not
|
||||
// be executed as the error would get returned due to getImageMirroringInfo
|
||||
// failing to find an image above.
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
@ -643,6 +642,11 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// If we issued a resync, return a non-final error as image needs to be recreated
|
||||
// locally. Caller retries till RBD syncs an initial version of the image to
|
||||
// report its status in the resync request.
|
||||
return nil, status.Error(codes.Unavailable, "awaiting initial resync due to split brain")
|
||||
}
|
||||
|
||||
// convert the last update time to UTC
|
||||
|
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
@ -31,6 +31,9 @@ const (
|
||||
|
||||
// CsiConfigFile is the location of the CSI config file.
|
||||
CsiConfigFile = "/etc/ceph-csi-config/config.json"
|
||||
|
||||
// ClusterIDKey is the name of the key containing clusterID.
|
||||
ClusterIDKey = "clusterID"
|
||||
)
|
||||
|
||||
// ClusterInfo strongly typed JSON spec for the below JSON structure.
|
||||
@ -105,8 +108,8 @@ func Mons(pathToConfig, clusterID string) (string, error) {
|
||||
return strings.Join(cluster.Monitors, ","), nil
|
||||
}
|
||||
|
||||
// RadosNamespace returns the namespace for the given clusterID.
|
||||
func RadosNamespace(pathToConfig, clusterID string) (string, error) {
|
||||
// GetRadosNamespace returns the namespace for the given clusterID.
|
||||
func GetRadosNamespace(pathToConfig, clusterID string) (string, error) {
|
||||
cluster, err := readClusterInfo(pathToConfig, clusterID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -131,10 +134,14 @@ func CephFSSubvolumeGroup(pathToConfig, clusterID string) (string, error) {
|
||||
|
||||
// GetMonsAndClusterID returns monitors and clusterID information read from
|
||||
// configfile.
|
||||
func GetMonsAndClusterID(options map[string]string) (string, string, error) {
|
||||
clusterID, ok := options["clusterID"]
|
||||
if !ok {
|
||||
return "", "", errors.New("clusterID must be set")
|
||||
func GetMonsAndClusterID(ctx context.Context, clusterID string, checkClusterIDMapping bool) (string, string, error) {
|
||||
if checkClusterIDMapping {
|
||||
monitors, mappedClusterID, err := FetchMappedClusterIDAndMons(ctx, clusterID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return monitors, mappedClusterID, nil
|
||||
}
|
||||
|
||||
monitors, err := Mons(CsiConfigFile, clusterID)
|
||||
@ -144,3 +151,57 @@ func GetMonsAndClusterID(options map[string]string) (string, string, error) {
|
||||
|
||||
return monitors, clusterID, nil
|
||||
}
|
||||
|
||||
// GetClusterID fetches clusterID from given options map.
|
||||
func GetClusterID(options map[string]string) (string, error) {
|
||||
clusterID, ok := options[ClusterIDKey]
|
||||
if !ok {
|
||||
return "", ErrClusterIDNotSet
|
||||
}
|
||||
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
// GetClusterIDFromMon will be called with a mon string to fetch
|
||||
// clusterID based on the passed in mon string. If passed in 'mon'
|
||||
// string has been found in the config the clusterID is returned,
|
||||
// else error.
|
||||
func GetClusterIDFromMon(mon string) (string, error) {
|
||||
clusterID, err := readClusterInfoWithMon(CsiConfigFile, mon)
|
||||
|
||||
return clusterID, err
|
||||
}
|
||||
|
||||
func readClusterInfoWithMon(pathToConfig, mon string) (string, error) {
|
||||
var config []ClusterInfo
|
||||
|
||||
// #nosec
|
||||
content, err := ioutil.ReadFile(pathToConfig)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error fetching configuration file %q: %w", pathToConfig, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(content, &config)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unmarshal failed (%w), raw buffer response: %s",
|
||||
err, string(content))
|
||||
}
|
||||
|
||||
for _, cluster := range config {
|
||||
// as the same mons can fall into different clusterIDs with
|
||||
// different radosnamespace configurations, we are bailing out
|
||||
// if radosnamespace configuration is found for this cluster
|
||||
if cluster.RadosNamespace != "" {
|
||||
continue
|
||||
}
|
||||
for _, m := range cluster.Monitors {
|
||||
if m == mon {
|
||||
return cluster.ClusterID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrMissingConfigForMonitor
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func cleanupTestData() {
|
||||
os.RemoveAll(basePath)
|
||||
}
|
||||
|
||||
// TODO: make this function less complex.
|
||||
// nolint:gocyclo,cyclop // TODO: make this function less complex.
|
||||
func TestCSIConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
var err error
|
||||
@ -132,4 +132,42 @@ func TestCSIConfig(t *testing.T) {
|
||||
if err != nil || content != "mon4,mon5,mon6" {
|
||||
t.Errorf("Failed: want (%s), got (%s) (%v)", "mon4,mon5,mon6", content, err)
|
||||
}
|
||||
|
||||
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}," +
|
||||
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon4\",\"mon5\",\"mon6\"]}]"
|
||||
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Should pass as clusterID is present in config
|
||||
content, err = readClusterInfoWithMon(pathToConfig, "mon1")
|
||||
if err != nil || content != "test2" {
|
||||
t.Errorf("Failed: want (%s), got (%s) (%v)", "test2", content, err)
|
||||
}
|
||||
|
||||
// TEST: Should pass as clusterID is present in config
|
||||
content, err = readClusterInfoWithMon(pathToConfig, "mon5")
|
||||
if err != nil || content != "test1" {
|
||||
t.Errorf("Failed: want (%s), got (%s) (%v)", "test1", content, err)
|
||||
}
|
||||
|
||||
// TEST: Should fail as clusterID is not present in config
|
||||
content, err = readClusterInfoWithMon(pathToConfig, "mon8")
|
||||
if err == nil {
|
||||
t.Errorf("Failed: got (%s)", content)
|
||||
}
|
||||
|
||||
data = "[{\"clusterID\":\"" + clusterID2 + "\", \"radosNamespace\": \"ns1\", \"monitors\":[\"mon1\"]}," +
|
||||
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon1\"]}]"
|
||||
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Should pass as clusterID is present in config
|
||||
content, err = readClusterInfoWithMon(pathToConfig, "mon1")
|
||||
if err != nil || content != clusterID1 {
|
||||
t.Errorf("Failed: want (%s), got (%s) (%v)", "test2", content, err)
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,10 @@ var (
|
||||
ErrSnapNameConflict = errors.New("snapshot name conflict")
|
||||
// ErrPoolNotFound is returned when pool is not found.
|
||||
ErrPoolNotFound = errors.New("pool not found")
|
||||
// ErrClusterIDNotSet is returned when cluster id is not set.
|
||||
ErrClusterIDNotSet = errors.New("clusterID must be set")
|
||||
// ErrMissingConfigForMonitor is returned when clusterID is not found for the mon.
|
||||
ErrMissingConfigForMonitor = errors.New("missing configuration of cluster ID for monitor")
|
||||
)
|
||||
|
||||
type errorPair struct {
|
||||
|
52
internal/util/log/log_utils.go
Normal file
52
internal/util/log/log_utils.go
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GzipLogFile convert and replace log file from text format to gzip
|
||||
// compressed format.
|
||||
func GzipLogFile(pathToFile string) error {
|
||||
// Get all the bytes from the file.
|
||||
content, err := ioutil.ReadFile(pathToFile) // #nosec:G304, file inclusion via variable.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace .log extension with .gz extension.
|
||||
newExt := strings.Replace(pathToFile, ".log", ".gz", -1)
|
||||
|
||||
// Open file for writing.
|
||||
gf, err := os.OpenFile(newExt, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) // #nosec:G304,G302, file inclusion & perms
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gf.Close() // #nosec:G307, error on close is not critical here
|
||||
|
||||
// Write compressed data.
|
||||
w := gzip.NewWriter(gf)
|
||||
defer w.Close()
|
||||
if _, err = w.Write(content); err != nil {
|
||||
os.Remove(newExt) // #nosec:G104, not important error to handle
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Remove(pathToFile)
|
||||
}
|
47
internal/util/log/log_utils_test.go
Normal file
47
internal/util/log/log_utils_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2021 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGzipLogFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpDir := t.TempDir()
|
||||
logFile, err := ioutil.TempFile(tmpDir, "rbd-*.log")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
defer os.Remove(logFile.Name())
|
||||
|
||||
if err = GzipLogFile(logFile.Name()); err != nil {
|
||||
t.Errorf("GzipLogFile failed: %v", err)
|
||||
}
|
||||
|
||||
newExt := strings.Replace(logFile.Name(), ".log", ".gz", -1)
|
||||
if _, err = os.Stat(newExt); errors.Is(err, os.ErrNotExist) {
|
||||
t.Errorf("compressed logFile (%s) not found: %v", newExt, err)
|
||||
}
|
||||
|
||||
os.Remove(newExt)
|
||||
}
|
@ -34,7 +34,7 @@ DriverInfo:
|
||||
persistence: true
|
||||
|
||||
# Volume ownership via fsGroup
|
||||
fsGroup: true
|
||||
fsGroup: false
|
||||
|
||||
# Raw block mode
|
||||
block: false
|
||||
|
@ -115,13 +115,6 @@ function validate_container_cmd() {
|
||||
fi
|
||||
}
|
||||
|
||||
function enable_psp() {
|
||||
echo "prepare minikube to support pod security policies"
|
||||
mkdir -p "$HOME"/.minikube/files/etc/kubernetes/addons
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
cp "$DIR"/psp.yaml "$HOME"/.minikube/files/etc/kubernetes/addons/psp.yaml
|
||||
}
|
||||
|
||||
# Storage providers and the default storage class is not needed for Ceph-CSI
|
||||
# testing. In order to reduce resources and potential conflicts between storage
|
||||
# plugins, disable them.
|
||||
@ -130,19 +123,6 @@ function disable_storage_addons() {
|
||||
${minikube} addons disable storage-provisioner 2>/dev/null || true
|
||||
}
|
||||
|
||||
function minikube_supports_psp() {
|
||||
local MINIKUBE_MAJOR
|
||||
local MINIKUBE_MINOR
|
||||
local MINIKUBE_PATCH
|
||||
MINIKUBE_MAJOR=$(minikube_version 1)
|
||||
MINIKUBE_MINOR=$(minikube_version 2)
|
||||
MINIKUBE_PATCH=$(minikube_version 3)
|
||||
if [[ "${MINIKUBE_MAJOR}" -ge 1 ]] && [[ "${MINIKUBE_MINOR}" -ge 11 ]] && [[ "${MINIKUBE_PATCH}" -ge 1 ]] || [[ "${MINIKUBE_MAJOR}" -ge 1 ]] && [[ "${MINIKUBE_MINOR}" -ge 12 ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# configure minikube
|
||||
MINIKUBE_ARCH=${MINIKUBE_ARCH:-"amd64"}
|
||||
MINIKUBE_VERSION=${MINIKUBE_VERSION:-"latest"}
|
||||
@ -183,7 +163,7 @@ CSI_NODE_DRIVER_REGISTRAR_VERSION=${CSI_NODE_DRIVER_REGISTRAR_VERSION:-"v2.2.0"}
|
||||
K8S_FEATURE_GATES=${K8S_FEATURE_GATES:-"ExpandCSIVolumes=true"}
|
||||
|
||||
#extra-config for kube https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/
|
||||
EXTRA_CONFIG_PSP="--extra-config=apiserver.enable-admission-plugins=PodSecurityPolicy"
|
||||
EXTRA_CONFIG_PSP="--extra-config=apiserver.enable-admission-plugins=PodSecurityPolicy --addons=pod-security-policy"
|
||||
|
||||
# kubelet.resolv-conf needs to point to a file, not a symlink
|
||||
# the default minikube VM has /etc/resolv.conf -> /run/systemd/resolve/resolv.conf
|
||||
@ -222,21 +202,8 @@ up)
|
||||
|
||||
disable_storage_addons
|
||||
|
||||
echo "starting minikube with kubeadm bootstrapper"
|
||||
if minikube_supports_psp; then
|
||||
enable_psp
|
||||
# shellcheck disable=SC2086
|
||||
${minikube} start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" --cni="${CNI}" ${EXTRA_CONFIG} ${EXTRA_CONFIG_PSP} --wait-timeout="${MINIKUBE_WAIT_TIMEOUT}" --wait="${MINIKUBE_WAIT}" --delete-on-failure "${DISK_CONFIG}"
|
||||
else
|
||||
# This is a workaround to fix psp issues in minikube >1.6.2 and <1.11.0
|
||||
# shellcheck disable=SC2086
|
||||
${minikube} start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" --cni="${CNI}" ${EXTRA_CONFIG} --wait-timeout="${MINIKUBE_WAIT_TIMEOUT}" --wait="${MINIKUBE_WAIT}" --delete-on-failure "${DISK_CONFIG}"
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
${minikube} kubectl -- apply -f "$DIR"/psp.yaml
|
||||
${minikube} stop
|
||||
# shellcheck disable=SC2086
|
||||
${minikube} start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" --cni="${CNI}" ${EXTRA_CONFIG} ${EXTRA_CONFIG_PSP} --wait-timeout="${MINIKUBE_WAIT_TIMEOUT}" --wait="${MINIKUBE_WAIT}" "${DISK_CONFIG}"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
${minikube} start --force --memory="${MEMORY}" --cpus="${CPUS}" -b kubeadm --kubernetes-version="${KUBE_VERSION}" --driver="${VM_DRIVER}" --feature-gates="${K8S_FEATURE_GATES}" --cni="${CNI}" ${EXTRA_CONFIG} ${EXTRA_CONFIG_PSP} --wait-timeout="${MINIKUBE_WAIT_TIMEOUT}" --wait="${MINIKUBE_WAIT}" --delete-on-failure ${DISK_CONFIG}
|
||||
|
||||
# create a link so the default dataDirHostPath will work for this
|
||||
# environment
|
||||
|
@ -19,9 +19,8 @@ spec:
|
||||
volumes:
|
||||
- "configMap"
|
||||
- "emptyDir"
|
||||
- "projected"
|
||||
- "secret"
|
||||
- "downwardAPI"
|
||||
- "projected"
|
||||
- "hostPath"
|
||||
|
||||
---
|
||||
|
@ -1,12 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
GOPACKAGES="$(go list -mod=vendor ./... | grep -v -e vendor -e e2e)"
|
||||
MOD_VENDOR=$(test -d vendor && echo '-mod=vendor')
|
||||
GOPACKAGES="$(go list "${MOD_VENDOR}" ./... | grep -v -e vendor -e e2e)"
|
||||
COVERFILE="${GO_COVER_DIR}/profile.cov"
|
||||
|
||||
# no special options, exec to go test w/ all pkgs
|
||||
if [[ "${TEST_EXITFIRST}" != "yes" && -z "${TEST_COVERAGE}" ]]; then
|
||||
# shellcheck disable=SC2086
|
||||
exec go test "${GO_TAGS}" -mod=vendor -v ${GOPACKAGES}
|
||||
exec go test ${GO_TAGS} ${MOD_VENDOR} -v ${GOPACKAGES}
|
||||
fi
|
||||
|
||||
# our options are set so we need to handle each go package one
|
||||
@ -20,7 +21,7 @@ failed=0
|
||||
for gopackage in ${GOPACKAGES}; do
|
||||
echo "--- testing: ${gopackage} ---"
|
||||
# shellcheck disable=SC2086
|
||||
go test "${GO_TAGS}" -mod=vendor -v ${GOTESTOPTS[*]} "${gopackage}" || ((failed += 1))
|
||||
go test "${GO_TAGS}" "${MOD_VENDOR}" -v ${GOTESTOPTS[*]} "${gopackage}" || ((failed += 1))
|
||||
if [[ -f cover.out ]]; then
|
||||
# Append to coverfile
|
||||
grep -v "^mode: count" cover.out >>"${COVERFILE}"
|
||||
|
17
tools/Makefile
Normal file
17
tools/Makefile
Normal file
@ -0,0 +1,17 @@
|
||||
# Copyright 2021 The Ceph-CSI Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: generate-deploy
|
||||
generate-deploy: yamlgen/main.go
|
||||
go run yamlgen/main.go
|
8
tools/README.md
Normal file
8
tools/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# Assorted Tools for maintaining and building Ceph-CSI
|
||||
|
||||
## `yamlgen`
|
||||
|
||||
`yamlgen` reads deployment configurations from the `api/` package and generates
|
||||
YAML files that can be used for deploying without advanced automation like
|
||||
Rook. The generated files are located under `deploy/`.
|
||||
|
83
tools/yamlgen/main.go
Normal file
83
tools/yamlgen/main.go
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2021 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ceph/ceph-csi/api/deploy/ocp"
|
||||
)
|
||||
|
||||
const header = `---
|
||||
#
|
||||
# /!\ DO NOT MODIFY THIS FILE
|
||||
#
|
||||
# This file has been automatically generated by Ceph-CSI yamlgen.
|
||||
# The source for the contents can be found in the api/deploy directory, make
|
||||
# your modifications there.
|
||||
#
|
||||
`
|
||||
|
||||
type deploymentArtifact struct {
|
||||
filename string
|
||||
// FIXME: This is not dynamic enough for additional YAML generating
|
||||
// functions. Need to look into typecasting the functions and passing
|
||||
// interface{} instead of ocp.SecurityContextConstraintsValues.
|
||||
yamlFunc func(ocp.SecurityContextConstraintsValues) (string, error)
|
||||
defaults ocp.SecurityContextConstraintsValues
|
||||
}
|
||||
|
||||
var yamlArtifacts = []deploymentArtifact{
|
||||
{
|
||||
"../deploy/scc.yaml",
|
||||
ocp.NewSecurityContextConstraintsYAML,
|
||||
ocp.SecurityContextConstraintsDefaults,
|
||||
},
|
||||
}
|
||||
|
||||
func main() {
|
||||
for _, artifact := range yamlArtifacts {
|
||||
writeArtifact(artifact)
|
||||
}
|
||||
}
|
||||
|
||||
func writeArtifact(artifact deploymentArtifact) {
|
||||
fmt.Printf("creating %q...", artifact.filename)
|
||||
|
||||
f, err := os.Create(artifact.filename)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create file %q: %v", artifact.filename, err))
|
||||
}
|
||||
|
||||
_, err = f.WriteString(header)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to write header to %q: %v", artifact.filename, err))
|
||||
}
|
||||
|
||||
data, err := artifact.yamlFunc(artifact.defaults)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to generate YAML for %q: %v", artifact.filename, err))
|
||||
}
|
||||
|
||||
_, err = f.WriteString(data)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to write contents to %q: %v", artifact.filename, err))
|
||||
}
|
||||
|
||||
fmt.Println("done!")
|
||||
}
|
10
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
@ -53,7 +53,7 @@ var LogHTTPRequestHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -94,6 +94,10 @@ var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
@ -124,7 +128,7 @@ var LogHTTPResponseHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -186,7 +190,7 @@ var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
325
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
325
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -355,11 +355,17 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -731,6 +737,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -1009,6 +1016,23 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"auditmanager": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@ -1141,6 +1165,14 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"braket": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"budgets": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
@ -1186,9 +1218,11 @@ var awsPartition = partition{
|
||||
"cloud9": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -1311,6 +1345,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -1464,6 +1499,7 @@ var awsPartition = partition{
|
||||
"codecommit": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
@ -4076,6 +4112,14 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ivs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kafka": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -4100,6 +4144,27 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kafkaconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -4227,6 +4292,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -4680,6 +4746,26 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediapackage-vod": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediastore": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -4754,6 +4840,22 @@ var awsPartition = partition{
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"models-v2-lex": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"models.lex": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@ -5007,6 +5109,62 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"network-firewall": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips-ca-central-1": endpoint{
|
||||
Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-1": endpoint{
|
||||
Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"oidc": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -5266,6 +5424,7 @@ var awsPartition = partition{
|
||||
"polly": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
@ -5427,6 +5586,25 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"quicksight": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"api": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"ram": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -5771,6 +5949,22 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"runtime-v2-lex": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"runtime.lex": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@ -5807,6 +6001,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -6956,6 +7151,20 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"ssm-incidents": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -7306,9 +7515,33 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"transcribestreaming-fips-ca-central-1": endpoint{
|
||||
Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"transcribestreaming-fips-us-east-1": endpoint{
|
||||
Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"transcribestreaming-fips-us-east-2": endpoint{
|
||||
Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"transcribestreaming-fips-us-west-2": endpoint{
|
||||
Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"transfer": service{
|
||||
@ -7881,6 +8114,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"appmesh": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"appsync": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -9733,6 +9973,18 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"identitystore": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "identitystore.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"inspector": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -9775,6 +10027,23 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iotevents": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ioteventsdata": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "data.iotevents.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iotsecuredtunneling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -9998,6 +10267,36 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"network-firewall": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-east-1": endpoint{
|
||||
Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"oidc": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "oidc.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"organizations": service{
|
||||
PartitionEndpoint: "aws-us-gov-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
@ -10067,6 +10366,13 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"quicksight": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"api": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ram": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -10381,6 +10687,19 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"servicediscovery-fips": endpoint{
|
||||
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicequotas": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.40.34"
|
||||
const SDKVersion = "1.40.50"
|
||||
|
18926
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
18926
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1700
vendor/github.com/aws/aws-sdk-go/service/kms/api.go
generated
vendored
1700
vendor/github.com/aws/aws-sdk-go/service/kms/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user