mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 16:20:28 +00:00
Merge pull request #297 from Madhu-1/master
merge csiv1.0 branch to master
This commit is contained in:
commit
426bf67966
23
.github/ISSUE_TEMPLATE/bug_report.md
vendored
23
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**Kubernetes and Ceph CSI Versions**
|
|
||||||
Kubernetes: 1.12 and below or 1.13 and above
|
|
||||||
Ceph CSI: 0.3.0 or 1.0.0
|
|
||||||
|
|
||||||
**Ceph CSI Driver logs**
|
|
||||||
Post logs from rbdplugin/cephfs plugin, provisioner, etc.
|
|
||||||
|
|
||||||
**To Reproduce**
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
|
||||||
A clear and concise description of what you want to happen.
|
|
||||||
|
|
||||||
**Describe alternatives you've considered**
|
|
||||||
A clear and concise description of any alternative solutions or features you've considered.
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context or screenshots about the feature request here.
|
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
pull_request_rules:
|
pull_request_rules:
|
||||||
- name: automatic merge
|
- name: automatic merge
|
||||||
conditions:
|
conditions:
|
||||||
|
47
.travis.yml
47
.travis.yml
@ -1,24 +1,55 @@
|
|||||||
|
---
|
||||||
# need for docker build
|
# need for docker build
|
||||||
sudo: true
|
sudo: true
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- realpath
|
||||||
|
- ruby
|
||||||
|
|
||||||
language: go
|
language: go
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
|
- csi-v0.3
|
||||||
- master
|
- master
|
||||||
|
- csi-v1.0 # remove this once csi-v1.0 becomes master
|
||||||
|
|
||||||
go: 1.9.x
|
go: 1.11.x
|
||||||
|
|
||||||
before_script:
|
env:
|
||||||
- GO_FILES=$(find . -iname '*.go' -type f | grep -v /vendor/)
|
global:
|
||||||
- go get -u golang.org/x/lint/golint #go get github.com/golang/lint/golint
|
- GOLANGCI_VERSION="v1.15.0"
|
||||||
|
- TEST_COVERAGE=stdout
|
||||||
|
- GO_METALINTER_THREADS=1
|
||||||
|
- GO_COVER_DIR=_output
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
include:
|
||||||
|
- name: Linter
|
||||||
|
install:
|
||||||
|
- gem install mdl
|
||||||
|
- pip install --user --upgrade pip
|
||||||
|
- pip install --user yamllint
|
||||||
|
# install golangci-lint
|
||||||
|
- curl -sf
|
||||||
|
"https://install.goreleaser.com/github.com/golangci/golangci-lint.sh"
|
||||||
|
| bash -s -- -b $GOPATH/bin "${GOLANGCI_VERSION}"
|
||||||
|
script:
|
||||||
|
- scripts/lint-text.sh --require-all
|
||||||
|
- scripts/lint-go.sh
|
||||||
|
- scripts/test-go.sh
|
||||||
|
|
||||||
|
- name: rbdplugin
|
||||||
script:
|
script:
|
||||||
- test -z $(gofmt -s -l $GO_FILES)
|
|
||||||
- go vet -v $(go list ./... | grep -v /vendor/)
|
|
||||||
- make rbdplugin
|
- make rbdplugin
|
||||||
|
|
||||||
|
- name: cephfsplugin
|
||||||
|
script:
|
||||||
- make cephfsplugin
|
- make cephfsplugin
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
- provider: script
|
- provider: script
|
||||||
script:
|
on: # yamllint disable-line rule:truthy
|
||||||
- ./deploy.sh
|
all_branches: true
|
||||||
|
script: ./deploy.sh
|
||||||
|
283
Gopkg.lock
generated
283
Gopkg.lock
generated
@ -2,145 +2,217 @@
|
|||||||
|
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:94ffc0947c337d618b6ff5ed9abaddc1217b090c1b3a1ae4739b35b7b25851d5"
|
||||||
name = "github.com/container-storage-interface/spec"
|
name = "github.com/container-storage-interface/spec"
|
||||||
packages = ["lib/go/csi/v0"]
|
packages = ["lib/go/csi"]
|
||||||
revision = "2178fdeea87f1150a17a63252eee28d4d8141f72"
|
pruneopts = "NUT"
|
||||||
version = "v0.3.0"
|
revision = "ed0bb0e1557548aa028307f48728767cfe8f6345"
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/ghodss/yaml"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
|
||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:abea725bcf0210887f5da19d804fffa1dd45a42a56bdf5f02322345e3fee4f0d"
|
||||||
name = "github.com/gogo/protobuf"
|
name = "github.com/gogo/protobuf"
|
||||||
packages = [
|
packages = [
|
||||||
"proto",
|
"proto",
|
||||||
"sortkeys"
|
"sortkeys",
|
||||||
]
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
|
revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
|
||||||
version = "v1.2.0"
|
version = "v1.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
digest = "1:bff0ce7c8e3d6357fa5a8549bbe4bdb620bddc13c11ae569aa7248ea92e2139f"
|
||||||
name = "github.com/golang/glog"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/golang/protobuf"
|
name = "github.com/golang/protobuf"
|
||||||
packages = [
|
packages = [
|
||||||
|
"descriptor",
|
||||||
"proto",
|
"proto",
|
||||||
|
"protoc-gen-go/descriptor",
|
||||||
"ptypes",
|
"ptypes",
|
||||||
"ptypes/any",
|
"ptypes/any",
|
||||||
"ptypes/duration",
|
"ptypes/duration",
|
||||||
"ptypes/timestamp",
|
"ptypes/timestamp",
|
||||||
"ptypes/wrappers"
|
"ptypes/wrappers",
|
||||||
]
|
]
|
||||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
pruneopts = "NUT"
|
||||||
version = "v1.1.0"
|
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
|
||||||
|
name = "github.com/google/btree"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||||
name = "github.com/google/gofuzz"
|
name = "github.com/google/gofuzz"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
|
||||||
|
name = "github.com/google/uuid"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
|
||||||
name = "github.com/googleapis/gnostic"
|
name = "github.com/googleapis/gnostic"
|
||||||
packages = [
|
packages = [
|
||||||
"OpenAPIv2",
|
"OpenAPIv2",
|
||||||
"compiler",
|
"compiler",
|
||||||
"extensions"
|
"extensions",
|
||||||
]
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
||||||
version = "v0.2.0"
|
version = "v0.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/howeyc/gopass"
|
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||||
packages = ["."]
|
name = "github.com/gregjones/httpcache"
|
||||||
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
packages = [
|
||||||
|
".",
|
||||||
|
"diskcache",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
|
||||||
name = "github.com/imdario/mergo"
|
name = "github.com/imdario/mergo"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
|
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
|
||||||
version = "v0.3.6"
|
version = "v0.3.6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728"
|
||||||
name = "github.com/json-iterator/go"
|
name = "github.com/json-iterator/go"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
|
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
|
||||||
version = "v1.1.5"
|
version = "v1.1.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
digest = "1:2b060bb1a39127e592baf9ab62ec1e94100dc22107f915183f3cd1f6d1cd579a"
|
||||||
name = "github.com/kubernetes-csi/drivers"
|
name = "github.com/kubernetes-csi/csi-lib-utils"
|
||||||
packages = ["pkg/csi-common"]
|
packages = ["protosanitizer"]
|
||||||
revision = "d8f283cd941f1e24e20e62653476a3722633f43e"
|
pruneopts = "NUT"
|
||||||
|
revision = "5853414e1d4771302e0df10d1870c444c2135799"
|
||||||
|
version = "v0.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||||
name = "github.com/modern-go/concurrent"
|
name = "github.com/modern-go/concurrent"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||||
version = "1.0.3"
|
version = "1.0.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
|
||||||
name = "github.com/modern-go/reflect2"
|
name = "github.com/modern-go/reflect2"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
|
||||||
name = "github.com/pborman/uuid"
|
name = "github.com/pborman/uuid"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
|
pruneopts = "NUT"
|
||||||
version = "v1.1"
|
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
|
||||||
|
version = "v1.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||||
|
name = "github.com/petar/GoLLRB"
|
||||||
|
packages = ["llrb"]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||||
|
name = "github.com/peterbourgon/diskv"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||||
|
version = "v2.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
|
||||||
name = "github.com/pkg/errors"
|
name = "github.com/pkg/errors"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
pruneopts = "NUT"
|
||||||
version = "v0.8.0"
|
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||||
|
version = "v0.8.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||||
name = "github.com/spf13/pflag"
|
name = "github.com/spf13/pflag"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||||
version = "v1.0.3"
|
version = "v1.0.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
packages = ["ssh/terminal"]
|
packages = ["ssh/terminal"]
|
||||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
pruneopts = "NUT"
|
||||||
|
revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:d4e37d487310720926343302a747f3f9e8f020e5fe961190c57ce437159a6e63"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = [
|
packages = [
|
||||||
"context",
|
"context",
|
||||||
|
"context/ctxhttp",
|
||||||
"http/httpguts",
|
"http/httpguts",
|
||||||
"http2",
|
"http2",
|
||||||
"http2/hpack",
|
"http2/hpack",
|
||||||
"idna",
|
"idna",
|
||||||
"internal/timeseries",
|
"internal/timeseries",
|
||||||
"trace"
|
"trace",
|
||||||
]
|
]
|
||||||
revision = "3673e40ba22529d22c3fd7c93e97b0ce50fa7bdd"
|
pruneopts = "NUT"
|
||||||
|
revision = "915654e7eabcea33ae277abbecf52f0d8b7a9fdc"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:293b8e3359faf71cc5f85f3c144115ca6540396f4e1ff0fa30cd014c04258c30"
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "36a7019397c4c86cf59eeab3bc0d188bac444277"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:0f0298002380ddbc31230516fc1dc354ff466e607823e9122d69cce79310bdc9"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = [
|
packages = [
|
||||||
"unix",
|
"unix",
|
||||||
"windows"
|
"windows",
|
||||||
]
|
]
|
||||||
revision = "e072cadbbdc8dd3d3ffa82b8b4b9304c261d9311"
|
pruneopts = "NUT"
|
||||||
|
revision = "a457fd036447854c0c02e89ea439481bdcf941a2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
packages = [
|
packages = [
|
||||||
"collate",
|
"collate",
|
||||||
@ -156,40 +228,69 @@
|
|||||||
"unicode/bidi",
|
"unicode/bidi",
|
||||||
"unicode/cldr",
|
"unicode/cldr",
|
||||||
"unicode/norm",
|
"unicode/norm",
|
||||||
"unicode/rangetable"
|
"unicode/rangetable",
|
||||||
]
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||||
version = "v0.3.0"
|
version = "v0.3.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
|
||||||
name = "golang.org/x/time"
|
name = "golang.org/x/time"
|
||||||
packages = ["rate"]
|
packages = ["rate"]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
digest = "1:34c10243da5972105edd1b4b883e2bd918fbb3f73fbe14d6af6929e547173494"
|
||||||
name = "google.golang.org/genproto"
|
name = "google.golang.org/appengine"
|
||||||
packages = ["googleapis/rpc/status"]
|
packages = [
|
||||||
revision = "2a72893556e4d1f6c795a4c039314c9fa751eedb"
|
"internal",
|
||||||
|
"internal/base",
|
||||||
|
"internal/datastore",
|
||||||
|
"internal/log",
|
||||||
|
"internal/remote_api",
|
||||||
|
"internal/urlfetch",
|
||||||
|
"urlfetch",
|
||||||
|
]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
|
||||||
|
version = "v1.4.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/rpc/status"]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "db91494dd46c1fdcbbde05e5ff5eb56df8f7d79a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:638e6e596d67d0a0c8aeb76ebdcf73561b701ea43f21963b1db231d96ed7db68"
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"balancer",
|
"balancer",
|
||||||
"balancer/base",
|
"balancer/base",
|
||||||
"balancer/roundrobin",
|
"balancer/roundrobin",
|
||||||
|
"binarylog/grpc_binarylog_v1",
|
||||||
"codes",
|
"codes",
|
||||||
"connectivity",
|
"connectivity",
|
||||||
"credentials",
|
"credentials",
|
||||||
|
"credentials/internal",
|
||||||
"encoding",
|
"encoding",
|
||||||
"encoding/proto",
|
"encoding/proto",
|
||||||
"grpclog",
|
"grpclog",
|
||||||
"internal",
|
"internal",
|
||||||
"internal/backoff",
|
"internal/backoff",
|
||||||
|
"internal/binarylog",
|
||||||
"internal/channelz",
|
"internal/channelz",
|
||||||
|
"internal/envconfig",
|
||||||
"internal/grpcrand",
|
"internal/grpcrand",
|
||||||
|
"internal/grpcsync",
|
||||||
|
"internal/syscall",
|
||||||
|
"internal/transport",
|
||||||
"keepalive",
|
"keepalive",
|
||||||
"metadata",
|
"metadata",
|
||||||
"naming",
|
"naming",
|
||||||
@ -200,24 +301,29 @@
|
|||||||
"stats",
|
"stats",
|
||||||
"status",
|
"status",
|
||||||
"tap",
|
"tap",
|
||||||
"transport"
|
|
||||||
]
|
]
|
||||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
pruneopts = "NUT"
|
||||||
version = "v1.13.0"
|
revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8"
|
||||||
|
version = "v1.17.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
||||||
name = "gopkg.in/inf.v0"
|
name = "gopkg.in/inf.v0"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
||||||
version = "v0.9.1"
|
version = "v0.9.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
|
||||||
name = "gopkg.in/yaml.v2"
|
name = "gopkg.in/yaml.v2"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||||
version = "v2.2.2"
|
version = "v2.2.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
|
||||||
name = "k8s.io/api"
|
name = "k8s.io/api"
|
||||||
packages = [
|
packages = [
|
||||||
"admissionregistration/v1alpha1",
|
"admissionregistration/v1alpha1",
|
||||||
@ -225,16 +331,19 @@
|
|||||||
"apps/v1",
|
"apps/v1",
|
||||||
"apps/v1beta1",
|
"apps/v1beta1",
|
||||||
"apps/v1beta2",
|
"apps/v1beta2",
|
||||||
|
"auditregistration/v1alpha1",
|
||||||
"authentication/v1",
|
"authentication/v1",
|
||||||
"authentication/v1beta1",
|
"authentication/v1beta1",
|
||||||
"authorization/v1",
|
"authorization/v1",
|
||||||
"authorization/v1beta1",
|
"authorization/v1beta1",
|
||||||
"autoscaling/v1",
|
"autoscaling/v1",
|
||||||
"autoscaling/v2beta1",
|
"autoscaling/v2beta1",
|
||||||
|
"autoscaling/v2beta2",
|
||||||
"batch/v1",
|
"batch/v1",
|
||||||
"batch/v1beta1",
|
"batch/v1beta1",
|
||||||
"batch/v2alpha1",
|
"batch/v2alpha1",
|
||||||
"certificates/v1beta1",
|
"certificates/v1beta1",
|
||||||
|
"coordination/v1beta1",
|
||||||
"core/v1",
|
"core/v1",
|
||||||
"events/v1beta1",
|
"events/v1beta1",
|
||||||
"extensions/v1beta1",
|
"extensions/v1beta1",
|
||||||
@ -244,15 +353,18 @@
|
|||||||
"rbac/v1alpha1",
|
"rbac/v1alpha1",
|
||||||
"rbac/v1beta1",
|
"rbac/v1beta1",
|
||||||
"scheduling/v1alpha1",
|
"scheduling/v1alpha1",
|
||||||
|
"scheduling/v1beta1",
|
||||||
"settings/v1alpha1",
|
"settings/v1alpha1",
|
||||||
"storage/v1",
|
"storage/v1",
|
||||||
"storage/v1alpha1",
|
"storage/v1alpha1",
|
||||||
"storage/v1beta1"
|
"storage/v1beta1",
|
||||||
]
|
]
|
||||||
revision = "7aac3e00a1b32fa476b83078cebaaca606b2fb48"
|
pruneopts = "NUT"
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
revision = "74b699b93c15473932b89e3d1818ba8282f3b5ab"
|
||||||
|
version = "kubernetes-1.13.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:09dee8b7c6cb2fc9c6bee525de3b95199a82a8647a189e153d072a1dfce17de7"
|
||||||
name = "k8s.io/apimachinery"
|
name = "k8s.io/apimachinery"
|
||||||
packages = [
|
packages = [
|
||||||
"pkg/api/errors",
|
"pkg/api/errors",
|
||||||
@ -280,6 +392,7 @@
|
|||||||
"pkg/util/framer",
|
"pkg/util/framer",
|
||||||
"pkg/util/intstr",
|
"pkg/util/intstr",
|
||||||
"pkg/util/json",
|
"pkg/util/json",
|
||||||
|
"pkg/util/naming",
|
||||||
"pkg/util/net",
|
"pkg/util/net",
|
||||||
"pkg/util/runtime",
|
"pkg/util/runtime",
|
||||||
"pkg/util/sets",
|
"pkg/util/sets",
|
||||||
@ -289,12 +402,14 @@
|
|||||||
"pkg/util/yaml",
|
"pkg/util/yaml",
|
||||||
"pkg/version",
|
"pkg/version",
|
||||||
"pkg/watch",
|
"pkg/watch",
|
||||||
"third_party/forked/golang/reflect"
|
"third_party/forked/golang/reflect",
|
||||||
]
|
]
|
||||||
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
|
pruneopts = "NUT"
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
revision = "572dfc7bdfcb4531361a17d27b92851f59acf0dc"
|
||||||
|
version = "kubernetes-1.13.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:638623327cb201b425a328d0bddb3379b05eb05ef4cab589380f0be07ac1dc17"
|
||||||
name = "k8s.io/client-go"
|
name = "k8s.io/client-go"
|
||||||
packages = [
|
packages = [
|
||||||
"discovery",
|
"discovery",
|
||||||
@ -305,16 +420,19 @@
|
|||||||
"kubernetes/typed/apps/v1",
|
"kubernetes/typed/apps/v1",
|
||||||
"kubernetes/typed/apps/v1beta1",
|
"kubernetes/typed/apps/v1beta1",
|
||||||
"kubernetes/typed/apps/v1beta2",
|
"kubernetes/typed/apps/v1beta2",
|
||||||
|
"kubernetes/typed/auditregistration/v1alpha1",
|
||||||
"kubernetes/typed/authentication/v1",
|
"kubernetes/typed/authentication/v1",
|
||||||
"kubernetes/typed/authentication/v1beta1",
|
"kubernetes/typed/authentication/v1beta1",
|
||||||
"kubernetes/typed/authorization/v1",
|
"kubernetes/typed/authorization/v1",
|
||||||
"kubernetes/typed/authorization/v1beta1",
|
"kubernetes/typed/authorization/v1beta1",
|
||||||
"kubernetes/typed/autoscaling/v1",
|
"kubernetes/typed/autoscaling/v1",
|
||||||
"kubernetes/typed/autoscaling/v2beta1",
|
"kubernetes/typed/autoscaling/v2beta1",
|
||||||
|
"kubernetes/typed/autoscaling/v2beta2",
|
||||||
"kubernetes/typed/batch/v1",
|
"kubernetes/typed/batch/v1",
|
||||||
"kubernetes/typed/batch/v1beta1",
|
"kubernetes/typed/batch/v1beta1",
|
||||||
"kubernetes/typed/batch/v2alpha1",
|
"kubernetes/typed/batch/v2alpha1",
|
||||||
"kubernetes/typed/certificates/v1beta1",
|
"kubernetes/typed/certificates/v1beta1",
|
||||||
|
"kubernetes/typed/coordination/v1beta1",
|
||||||
"kubernetes/typed/core/v1",
|
"kubernetes/typed/core/v1",
|
||||||
"kubernetes/typed/events/v1beta1",
|
"kubernetes/typed/events/v1beta1",
|
||||||
"kubernetes/typed/extensions/v1beta1",
|
"kubernetes/typed/extensions/v1beta1",
|
||||||
@ -324,11 +442,16 @@
|
|||||||
"kubernetes/typed/rbac/v1alpha1",
|
"kubernetes/typed/rbac/v1alpha1",
|
||||||
"kubernetes/typed/rbac/v1beta1",
|
"kubernetes/typed/rbac/v1beta1",
|
||||||
"kubernetes/typed/scheduling/v1alpha1",
|
"kubernetes/typed/scheduling/v1alpha1",
|
||||||
|
"kubernetes/typed/scheduling/v1beta1",
|
||||||
"kubernetes/typed/settings/v1alpha1",
|
"kubernetes/typed/settings/v1alpha1",
|
||||||
"kubernetes/typed/storage/v1",
|
"kubernetes/typed/storage/v1",
|
||||||
"kubernetes/typed/storage/v1alpha1",
|
"kubernetes/typed/storage/v1alpha1",
|
||||||
"kubernetes/typed/storage/v1beta1",
|
"kubernetes/typed/storage/v1beta1",
|
||||||
|
"pkg/apis/clientauthentication",
|
||||||
|
"pkg/apis/clientauthentication/v1alpha1",
|
||||||
|
"pkg/apis/clientauthentication/v1beta1",
|
||||||
"pkg/version",
|
"pkg/version",
|
||||||
|
"plugin/pkg/client/auth/exec",
|
||||||
"rest",
|
"rest",
|
||||||
"rest/watch",
|
"rest/watch",
|
||||||
"tools/auth",
|
"tools/auth",
|
||||||
@ -340,34 +463,80 @@
|
|||||||
"tools/reference",
|
"tools/reference",
|
||||||
"transport",
|
"transport",
|
||||||
"util/cert",
|
"util/cert",
|
||||||
|
"util/connrotation",
|
||||||
"util/flowcontrol",
|
"util/flowcontrol",
|
||||||
"util/homedir",
|
"util/homedir",
|
||||||
"util/integer"
|
"util/integer",
|
||||||
]
|
]
|
||||||
revision = "82eadfdc39007c2eb47e3ddeb7ed7d96365e409d"
|
pruneopts = "NUT"
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
revision = "6e4752048fde21176ab35eb54ec1117359830d8a"
|
||||||
|
version = "kubernetes-1.13.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
|
||||||
|
name = "k8s.io/klog"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:cf54450b967dcae4f270dfa44395b01175e5358bbd79fe8a6073b13f220f1c2e"
|
||||||
name = "k8s.io/kubernetes"
|
name = "k8s.io/kubernetes"
|
||||||
packages = [
|
packages = [
|
||||||
"pkg/util/file",
|
"pkg/util/file",
|
||||||
"pkg/util/io",
|
"pkg/util/io",
|
||||||
"pkg/util/keymutex",
|
"pkg/util/keymutex",
|
||||||
"pkg/util/mount",
|
"pkg/util/mount",
|
||||||
"pkg/util/nsenter"
|
"pkg/util/nsenter",
|
||||||
]
|
]
|
||||||
revision = "b1b29978270dc22fecc592ac55d903350454310a"
|
pruneopts = "NUT"
|
||||||
version = "v1.11.1"
|
revision = "721bfa751924da8d1680787490c54b9179b1fed0"
|
||||||
|
version = "v1.13.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5"
|
||||||
name = "k8s.io/utils"
|
name = "k8s.io/utils"
|
||||||
packages = ["exec"]
|
packages = ["exec"]
|
||||||
revision = "66066c83e385e385ccc3c964b44fd7dcd413d0ed"
|
pruneopts = "NUT"
|
||||||
|
revision = "8a16e7dd8fb6d97d1331b0c79a16722f934b00b1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||||
|
name = "sigs.k8s.io/yaml"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "6e997f1f66bb80e4e145da05f1872070947725cba04d3da2521bcdc0e33d7150"
|
input-imports = [
|
||||||
|
"github.com/container-storage-interface/spec/lib/go/csi",
|
||||||
|
"github.com/golang/protobuf/ptypes",
|
||||||
|
"github.com/golang/protobuf/ptypes/timestamp",
|
||||||
|
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer",
|
||||||
|
"github.com/pborman/uuid",
|
||||||
|
"github.com/pkg/errors",
|
||||||
|
"golang.org/x/net/context",
|
||||||
|
"google.golang.org/grpc",
|
||||||
|
"google.golang.org/grpc/codes",
|
||||||
|
"google.golang.org/grpc/status",
|
||||||
|
"k8s.io/api/core/v1",
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets",
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait",
|
||||||
|
"k8s.io/client-go/kubernetes",
|
||||||
|
"k8s.io/client-go/rest",
|
||||||
|
"k8s.io/client-go/tools/clientcmd",
|
||||||
|
"k8s.io/klog",
|
||||||
|
"k8s.io/kubernetes/pkg/util/keymutex",
|
||||||
|
"k8s.io/kubernetes/pkg/util/mount",
|
||||||
|
"k8s.io/kubernetes/pkg/util/nsenter",
|
||||||
|
"k8s.io/utils/exec",
|
||||||
|
]
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
18
Gopkg.toml
18
Gopkg.toml
@ -1,14 +1,6 @@
|
|||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/container-storage-interface/spec"
|
name = "github.com/container-storage-interface/spec"
|
||||||
version = "~0.3.0"
|
version = "~1.0.0"
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/kubernetes-csi/drivers"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/golang/glog"
|
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
|
revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
|
||||||
@ -19,15 +11,15 @@
|
|||||||
version = "1.10.0"
|
version = "1.10.0"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
version = "kubernetes-1.13.3"
|
||||||
name = "k8s.io/apimachinery"
|
name = "k8s.io/apimachinery"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "k8s.io/kubernetes"
|
name = "k8s.io/kubernetes"
|
||||||
version = "v1.10.0-beta.1"
|
version = "v1.13.3"
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
version = "kubernetes-1.13.3"
|
||||||
name = "k8s.io/api"
|
name = "k8s.io/api"
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
@ -36,7 +28,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "k8s.io/client-go"
|
name = "k8s.io/client-go"
|
||||||
version = "kubernetes-1.10.0-beta.1"
|
version = "kubernetes-1.13.3"
|
||||||
|
|
||||||
[prune]
|
[prune]
|
||||||
go-tests = true
|
go-tests = true
|
||||||
|
29
Makefile
29
Makefile
@ -14,42 +14,49 @@
|
|||||||
|
|
||||||
.PHONY: all rbdplugin cephfsplugin
|
.PHONY: all rbdplugin cephfsplugin
|
||||||
|
|
||||||
|
CONTAINER_CMD?=docker
|
||||||
|
|
||||||
RBD_IMAGE_NAME=$(if $(ENV_RBD_IMAGE_NAME),$(ENV_RBD_IMAGE_NAME),quay.io/cephcsi/rbdplugin)
|
RBD_IMAGE_NAME=$(if $(ENV_RBD_IMAGE_NAME),$(ENV_RBD_IMAGE_NAME),quay.io/cephcsi/rbdplugin)
|
||||||
RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),v0.3.0)
|
RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),v1.0.0)
|
||||||
|
|
||||||
CEPHFS_IMAGE_NAME=$(if $(ENV_CEPHFS_IMAGE_NAME),$(ENV_CEPHFS_IMAGE_NAME),quay.io/cephcsi/cephfsplugin)
|
CEPHFS_IMAGE_NAME=$(if $(ENV_CEPHFS_IMAGE_NAME),$(ENV_CEPHFS_IMAGE_NAME),quay.io/cephcsi/cephfsplugin)
|
||||||
CEPHFS_IMAGE_VERSION=$(if $(ENV_CEPHFS_IMAGE_VERSION),$(ENV_CEPHFS_IMAGE_VERSION),v0.3.0)
|
CEPHFS_IMAGE_VERSION=$(if $(ENV_CEPHFS_IMAGE_VERSION),$(ENV_CEPHFS_IMAGE_VERSION),v1.0.0)
|
||||||
|
|
||||||
$(info rbd image settings: $(RBD_IMAGE_NAME) version $(RBD_IMAGE_VERSION))
|
$(info rbd image settings: $(RBD_IMAGE_NAME) version $(RBD_IMAGE_VERSION))
|
||||||
$(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSION))
|
$(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSION))
|
||||||
|
|
||||||
all: rbdplugin cephfsplugin
|
all: rbdplugin cephfsplugin
|
||||||
|
|
||||||
test:
|
test: go-test static-check
|
||||||
go test github.com/ceph/ceph-csi/pkg/... -cover
|
|
||||||
go vet github.com/ceph/ceph-csi/pkg/...
|
go-test:
|
||||||
|
./scripts/test-go.sh
|
||||||
|
|
||||||
|
static-check:
|
||||||
|
./scripts/lint-go.sh
|
||||||
|
./scripts/lint-text.sh
|
||||||
|
|
||||||
rbdplugin:
|
rbdplugin:
|
||||||
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
||||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd
|
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./cmd/rbd
|
||||||
|
|
||||||
image-rbdplugin: rbdplugin
|
image-rbdplugin: rbdplugin
|
||||||
cp _output/rbdplugin deploy/rbd/docker
|
cp _output/rbdplugin deploy/rbd/docker
|
||||||
docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
|
$(CONTAINER_CMD) build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
|
||||||
|
|
||||||
cephfsplugin:
|
cephfsplugin:
|
||||||
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
||||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cephfs
|
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cmd/cephfs
|
||||||
|
|
||||||
image-cephfsplugin: cephfsplugin
|
image-cephfsplugin: cephfsplugin
|
||||||
cp _output/cephfsplugin deploy/cephfs/docker
|
cp _output/cephfsplugin deploy/cephfs/docker
|
||||||
docker build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker
|
$(CONTAINER_CMD) build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker
|
||||||
|
|
||||||
push-image-rbdplugin: image-rbdplugin
|
push-image-rbdplugin: image-rbdplugin
|
||||||
docker push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION)
|
$(CONTAINER_CMD) push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION)
|
||||||
|
|
||||||
push-image-cephfsplugin: image-cephfsplugin
|
push-image-cephfsplugin: image-cephfsplugin
|
||||||
docker push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION)
|
$(CONTAINER_CMD) push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean -r -x
|
go clean -r -x
|
||||||
|
24
README.md
24
README.md
@ -1,18 +1,22 @@
|
|||||||
# Ceph CSI
|
# Ceph CSI 1.0.0
|
||||||
|
|
||||||
# Supported platforms
|
[Container Storage Interface
|
||||||
|
(CSI)](https://github.com/container-storage-interface/) driver, provisioner,
|
||||||
For Kubernetes 1.12 and below, please use [0.3 images and deployments](https://github.com/ceph/ceph-csi/tree/master/deploy/rbd/kubernetes).
|
and attacher for Ceph RBD and CephFS.
|
||||||
|
|
||||||
For Kubernetes 1.13 and above, please use [1.0 images and deployments](https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/rbd/kubernetes).
|
|
||||||
|
|
||||||
[Container Storage Interface (CSI)](https://github.com/container-storage-interface/) driver, provisioner, and attacher for Ceph RBD and CephFS.
|
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
Ceph CSI plugins implement an interface between CSI enabled Container Orchestrator (CO) and CEPH cluster. It allows dynamically provisioning CEPH volumes and attaching them to workloads. Current implementation of Ceph CSI plugins was tested in Kubernetes environment (requires Kubernetes 1.11+), but the code does not rely on any Kubernetes specific calls (WIP to make it k8s agnostic) and should be able to run with any CSI enabled CO.
|
Ceph CSI plugins implement an interface between CSI enabled Container
|
||||||
|
Orchestrator (CO) and CEPH cluster.
|
||||||
|
It allows dynamically provisioning CEPH volumes and attaching them to
|
||||||
|
workloads.
|
||||||
|
Current implementation of Ceph CSI plugins was tested in Kubernetes
|
||||||
|
environment (requires Kubernetes 1.13+), but the code does not rely on
|
||||||
|
any Kubernetes specific calls (WIP to make it k8s agnostic) and
|
||||||
|
should be able to run with any CSI enabled CO.
|
||||||
|
|
||||||
For details about configuration and deployment of RBD and CephFS CSI plugins, see documentation in `docs/`.
|
For details about configuration and deployment of RBD and
|
||||||
|
CephFS CSI plugins, see documentation in `docs/`.
|
||||||
|
|
||||||
For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
|
For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
|
||||||
|
|
||||||
|
@ -19,58 +19,45 @@ package main
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/pkg/cephfs"
|
"github.com/ceph/ceph-csi/pkg/cephfs"
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
"github.com/golang/glog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
flag.Set("logtostderr", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||||
driverName = flag.String("drivername", "cephfs.csi.ceph.com", "name of the driver")
|
driverName = flag.String("drivername", "cephfs.csi.ceph.com", "name of the driver")
|
||||||
nodeId = flag.String("nodeid", "", "node id")
|
nodeID = flag.String("nodeid", "", "node id")
|
||||||
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
|
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
|
||||||
metadataStorage = flag.String("metadatastorage", "node", "metadata persistence method [node|k8s_configmap]")
|
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||||
|
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func init() {
|
||||||
|
klog.InitFlags(nil)
|
||||||
|
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||||
|
klog.Exitf("failed to set logtostderr flag: %v", err)
|
||||||
|
}
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
err := util.ValidateDriverName(*driverName)
|
err := util.ValidateDriverName(*driverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to validate driver name: %v", err)
|
klog.Fatalln(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
//update plugin name
|
//update plugin name
|
||||||
cephfs.PluginFolder = cephfs.PluginFolder + *driverName
|
cephfs.PluginFolder = cephfs.PluginFolder + *driverName
|
||||||
|
|
||||||
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil {
|
cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName)
|
||||||
glog.Errorf("failed to create persistent storage for controller: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil {
|
|
||||||
glog.Errorf("failed to create persistent storage for node: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to define cache persistence method: %v", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
driver := cephfs.NewCephFSDriver()
|
driver := cephfs.NewDriver()
|
||||||
driver.Run(*driverName, *nodeId, *endpoint, *volumeMounter, cp)
|
driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, cp)
|
||||||
|
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPersistentStorage(persistentStoragePath string) error {
|
|
||||||
return os.MkdirAll(persistentStoragePath, os.FileMode(0755))
|
|
||||||
}
|
|
@ -19,63 +19,46 @@ package main
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/pkg/rbd"
|
"github.com/ceph/ceph-csi/pkg/rbd"
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
"github.com/golang/glog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
flag.Set("logtostderr", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||||
driverName = flag.String("drivername", "rbd.csi.ceph.com", "name of the driver")
|
driverName = flag.String("drivername", "rbd.csi.ceph.com", "name of the driver")
|
||||||
nodeID = flag.String("nodeid", "", "node id")
|
nodeID = flag.String("nodeid", "", "node id")
|
||||||
containerized = flag.Bool("containerized", true, "whether run as containerized")
|
containerized = flag.Bool("containerized", true, "whether run as containerized")
|
||||||
metadataStorage = flag.String("metadatastorage", "node", "metadata persistence method [node|k8s_configmap]")
|
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||||
|
configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+
|
||||||
|
" cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets")
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func init() {
|
||||||
|
klog.InitFlags(nil)
|
||||||
|
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||||
|
klog.Exitf("failed to set logtostderr flag: %v", err)
|
||||||
|
}
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
err := util.ValidateDriverName(*driverName)
|
err := util.ValidateDriverName(*driverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to validate driver name: %v", err)
|
klog.Fatalln(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
//update plugin name
|
//update plugin name
|
||||||
rbd.PluginFolder = rbd.PluginFolder + *driverName
|
rbd.PluginFolder = rbd.PluginFolder + *driverName
|
||||||
|
|
||||||
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "controller")); err != nil {
|
cp, err := util.CreatePersistanceStorage(rbd.PluginFolder, *metadataStorage, *driverName)
|
||||||
glog.Errorf("failed to create persistent storage for controller %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "node")); err != nil {
|
|
||||||
glog.Errorf("failed to create persistent storage for node %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to define cache persistence method: %v", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
driver := rbd.GetRBDDriver()
|
driver := rbd.NewDriver()
|
||||||
driver.Run(*driverName, *nodeID, *endpoint, *containerized, cp)
|
driver.Run(*driverName, *nodeID, *endpoint, *configRoot, *containerized, cp)
|
||||||
|
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPersistentStorage(persistentStoragePath string) error {
|
|
||||||
if _, err := os.Stat(persistentStoragePath); os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(persistentStoragePath, os.FileMode(0755)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
58
deploy.sh
58
deploy.sh
@ -1,6 +1,58 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
|
push_helm_chats() {
|
||||||
docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
|
PACKAGE=$1
|
||||||
make push-image-rbdplugin push-image-cephfsplugin
|
CHANGED=0
|
||||||
|
VERSION=$(grep 'version:' deploy/"$PACKAGE"/helm/Chart.yaml | awk '{print $2}')
|
||||||
|
|
||||||
|
if [ ! -f "tmp/csi-charts/docs/$PACKAGE/ceph-csi-$PACKAGE-$VERSION.tgz" ]; then
|
||||||
|
CHANGED=1
|
||||||
|
ln -s helm deploy/"$PACKAGE"/ceph-csi-"$PACKAGE"
|
||||||
|
mkdir -p tmp/csi-charts/docs/"$PACKAGE"
|
||||||
|
pushd tmp/csi-charts/docs/"$PACKAGE" >/dev/null
|
||||||
|
helm init --client-only
|
||||||
|
helm package ../../../../deploy/"$PACKAGE"/ceph-csi-"$PACKAGE"
|
||||||
|
popd >/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $CHANGED -eq 1 ]; then
|
||||||
|
pushd tmp/csi-charts/docs >/dev/null
|
||||||
|
helm repo index .
|
||||||
|
git add --all :/ && git commit -m "Update repo"
|
||||||
|
git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts
|
||||||
|
popd >/dev/null
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "${TRAVIS_BRANCH}" == 'csi-v0.3' ]; then
|
||||||
|
export RBD_IMAGE_VERSION='v0.3.0'
|
||||||
|
export CEPHFS_IMAGE_VERSION='v0.3.0'
|
||||||
|
elif [ "${TRAVIS_BRANCH}" == 'csi-v1.0' ]; then
|
||||||
|
export RBD_IMAGE_VERSION='v1.0.0'
|
||||||
|
export CEPHFS_IMAGE_VERSION='v1.0.0'
|
||||||
|
else
|
||||||
|
echo "!!! Branch ${TRAVIS_BRANCH} is not a deployable branch; exiting"
|
||||||
|
exit 0 # Exiting 0 so that this isn't marked as failing
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
|
||||||
|
"${CONTAINER_CMD:-docker}" login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
|
||||||
|
make push-image-rbdplugin push-image-cephfsplugin
|
||||||
|
|
||||||
|
set -xe
|
||||||
|
|
||||||
|
mkdir -p tmp
|
||||||
|
pushd tmp >/dev/null
|
||||||
|
|
||||||
|
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get >get_helm.sh
|
||||||
|
chmod 700 get_helm.sh
|
||||||
|
./get_helm.sh
|
||||||
|
|
||||||
|
git clone https://github.com/ceph/csi-charts
|
||||||
|
|
||||||
|
mkdir -p csi-charts/docs
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
push_helm_chats rbd
|
||||||
|
push_helm_chats cephfs
|
||||||
fi
|
fi
|
||||||
|
21
deploy/cephfs/helm/.helmignore
Normal file
21
deploy/cephfs/helm/.helmignore
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
14
deploy/cephfs/helm/Chart.yaml
Normal file
14
deploy/cephfs/helm/Chart.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
appVersion: "1.0.0"
|
||||||
|
description: "Container Storage Interface (CSI) driver,
|
||||||
|
provisioner, and attacher for Ceph cephfs"
|
||||||
|
name: ceph-csi-cephfs
|
||||||
|
version: 0.5.1
|
||||||
|
keywords:
|
||||||
|
- ceph
|
||||||
|
- cephfs
|
||||||
|
- ceph-csi
|
||||||
|
home: https://github.com/ceph/ceph-csi
|
||||||
|
sources:
|
||||||
|
- https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/cephfs/helm
|
29
deploy/cephfs/helm/README.md
Normal file
29
deploy/cephfs/helm/README.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# ceph-csi-cephfs
|
||||||
|
|
||||||
|
The ceph-csi-cephfs chart adds cephfs volume support to your cluster.
|
||||||
|
|
||||||
|
## Install Chart
|
||||||
|
|
||||||
|
To install the Chart into your Kubernetes cluster
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install --namespace "ceph-csi-cephfs" --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs
|
||||||
|
```
|
||||||
|
|
||||||
|
After installation succeeds, you can get a status of Chart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm status "ceph-csi-cephfs"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to delete your Chart, use this command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm delete --purge "ceph-csi-cephfs"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to delete the namespace, use this command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete namespace ceph-csi-rbd
|
||||||
|
```
|
2
deploy/cephfs/helm/templates/NOTES.txt
Normal file
2
deploy/cephfs/helm/templates/NOTES.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Examples on how to configure a storage class and start using the driver are here:
|
||||||
|
https://github.com/ceph/ceph-csi/tree/csi-v1.0/examples/cephfs
|
119
deploy/cephfs/helm/templates/_helpers.tpl
Normal file
119
deploy/cephfs/helm/templates/_helpers.tpl
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride -}}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.attacher.fullname" -}}
|
||||||
|
{{- if .Values.attacher.fullnameOverride -}}
|
||||||
|
{{- .Values.attacher.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s-%s" .Release.Name $name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.nodeplugin.fullname" -}}
|
||||||
|
{{- if .Values.nodeplugin.fullnameOverride -}}
|
||||||
|
{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.provisioner.fullname" -}}
|
||||||
|
{{- if .Values.provisioner.fullnameOverride -}}
|
||||||
|
{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.serviceAccountName.attacher" -}}
|
||||||
|
{{- if .Values.serviceAccounts.attacher.create -}}
|
||||||
|
{{ default (include "ceph-csi-cephfs.attacher.fullname" .) .Values.serviceAccounts.attacher.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccounts.attacher.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.serviceAccountName.nodeplugin" -}}
|
||||||
|
{{- if .Values.serviceAccounts.nodeplugin.create -}}
|
||||||
|
{{ default (include "ceph-csi-cephfs.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccounts.nodeplugin.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "ceph-csi-cephfs.serviceAccountName.provisioner" -}}
|
||||||
|
{{- if .Values.serviceAccounts.provisioner.create -}}
|
||||||
|
{{ default (include "ceph-csi-cephfs.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccounts.provisioner.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
28
deploy/cephfs/helm/templates/attacher-clusterrole.yaml
Normal file
28
deploy/cephfs/helm/templates/attacher-clusterrole.yaml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
{{- end -}}
|
@ -0,0 +1,20 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- end -}}
|
18
deploy/cephfs/helm/templates/attacher-service.yaml
Normal file
18
deploy/cephfs/helm/templates/attacher-service.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
12
deploy/cephfs/helm/templates/attacher-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/attacher-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccounts.attacher.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
{{- end -}}
|
60
deploy/cephfs/helm/templates/attacher-statefulset.yaml
Normal file
60
deploy/cephfs/helm/templates/attacher-statefulset.yaml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
serviceName: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||||
|
replicas: {{ .Values.attacher.replicas }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||||
|
containers:
|
||||||
|
- name: csi-cephfsplugin-attacher
|
||||||
|
image: "{{ .Values.attacher.image.repository }}:{{ .Values.attacher.image.tag }}"
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
imagePullPolicy: {{ .Values.attacher.image.pullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.attacher.resources | indent 12 }}
|
||||||
|
volumes:
|
||||||
|
- name: socket-dir
|
||||||
|
hostPath:
|
||||||
|
path: {{ .Values.socketDir }}
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
{{- if .Values.attacher.affinity -}}
|
||||||
|
affinity:
|
||||||
|
{{ toYaml .Values.attacher.affinity . | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.attacher.nodeSelector -}}
|
||||||
|
nodeSelector:
|
||||||
|
{{ toYaml .Values.attacher.nodeSelector | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.attacher.tolerations -}}
|
||||||
|
tolerations:
|
||||||
|
{{ toYaml .Values.attacher.tolerations | indent 8 }}
|
||||||
|
{{- end -}}
|
31
deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml
Normal file
31
deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["namespaces"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
{{- end -}}
|
@ -0,0 +1,20 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- end -}}
|
150
deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml
Normal file
150
deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: apps/v1beta2
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||||
|
# resolved through k8s service, set dns policy to cluster first
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
containers:
|
||||||
|
- name: driver-registrar
|
||||||
|
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--csi-address=/csi/{{ .Values.socketFile }}"
|
||||||
|
- "--kubelet-registration-path={{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: [
|
||||||
|
"/bin/sh", "-c",
|
||||||
|
'rm -rf /registration/{{ .Values.driverName }}
|
||||||
|
/registration/{{ .Values.driverName }}-reg.sock'
|
||||||
|
]
|
||||||
|
env:
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: plugin-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: registration-dir
|
||||||
|
mountPath: /registration
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }}
|
||||||
|
- name: csi-cephfsplugin
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
|
||||||
|
args :
|
||||||
|
- "--nodeid=$(NODE_ID)"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--v=5"
|
||||||
|
- "--drivername=$(DRIVER_NAME)"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
- "--mountcachedir=/mount-cache-dir"
|
||||||
|
env:
|
||||||
|
- name: HOST_ROOTFS
|
||||||
|
value: "/rootfs"
|
||||||
|
- name: DRIVER_NAME
|
||||||
|
value: {{ .Values.driverName }}
|
||||||
|
- name: NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: mount-cache-dir
|
||||||
|
mountPath: /mount-cache-dir
|
||||||
|
- name: plugin-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
- name: pods-mount-dir
|
||||||
|
mountPath: /var/lib/kubelet/pods
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
mountPath: {{ .Values.volumeDevicesDir }}
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
|
- mountPath: /dev
|
||||||
|
name: host-dev
|
||||||
|
- mountPath: /rootfs
|
||||||
|
name: host-rootfs
|
||||||
|
- mountPath: /sys
|
||||||
|
name: host-sys
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||||
|
volumes:
|
||||||
|
- name: mount-cache-dir
|
||||||
|
emptyDir: {}
|
||||||
|
- name: plugin-dir
|
||||||
|
hostPath:
|
||||||
|
path: {{ .Values.socketDir }}
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
hostPath:
|
||||||
|
path: {{ .Values.volumeDevicesDir }}
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
- name: registration-dir
|
||||||
|
hostPath:
|
||||||
|
path: {{ .Values.registrationDir }}
|
||||||
|
type: Directory
|
||||||
|
- name: pods-mount-dir
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet/pods
|
||||||
|
type: Directory
|
||||||
|
- name: host-dev
|
||||||
|
hostPath:
|
||||||
|
path: /dev
|
||||||
|
- name: host-rootfs
|
||||||
|
hostPath:
|
||||||
|
path: /
|
||||||
|
- name: host-sys
|
||||||
|
hostPath:
|
||||||
|
path: /sys
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
{{- if .Values.nodeplugin.affinity -}}
|
||||||
|
affinity:
|
||||||
|
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.nodeplugin.nodeSelector -}}
|
||||||
|
nodeSelector:
|
||||||
|
{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.nodeplugin.tolerations -}}
|
||||||
|
tolerations:
|
||||||
|
{{ toYaml .Values.nodeplugin.tolerations | indent 8 }}
|
||||||
|
{{- end -}}
|
12
deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccounts.nodeplugin.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.nodeplugin.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
{{- end -}}
|
37
deploy/cephfs/helm/templates/provisioner-clusterrole.yaml
Normal file
37
deploy/cephfs/helm/templates/provisioner-clusterrole.yaml
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
{{- end -}}
|
@ -0,0 +1,20 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- end -}}
|
19
deploy/cephfs/helm/templates/provisioner-role.yaml
Normal file
19
deploy/cephfs/helm/templates/provisioner-role.yaml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
{{- end -}}
|
21
deploy/cephfs/helm/templates/provisioner-rolebinding.yaml
Normal file
21
deploy/cephfs/helm/templates/provisioner-rolebinding.yaml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end -}}
|
18
deploy/cephfs/helm/templates/provisioner-service.yaml
Normal file
18
deploy/cephfs/helm/templates/provisioner-service.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
12
deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccounts.provisioner.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
{{- end -}}
|
94
deploy/cephfs/helm/templates/provisioner-statefulset.yaml
Normal file
94
deploy/cephfs/helm/templates/provisioner-statefulset.yaml
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
serviceName: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||||
|
replicas: {{ .Values.provisioner.replicas }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
spec:
|
||||||
|
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||||
|
containers:
|
||||||
|
- name: csi-provisioner
|
||||||
|
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
|
||||||
|
args:
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
- "--v=5"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
imagePullPolicy: {{ .Values.provisioner.image.pullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.provisioner.resources | indent 12 }}
|
||||||
|
- name: csi-cephfsplugin
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
|
||||||
|
args :
|
||||||
|
- "--nodeid=$(NODE_ID)"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--v=5"
|
||||||
|
- "--drivername=$(DRIVER_NAME)"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
env:
|
||||||
|
- name: HOST_ROOTFS
|
||||||
|
value: "/rootfs"
|
||||||
|
- name: DRIVER_NAME
|
||||||
|
value: {{ .Values.driverName }}
|
||||||
|
- name: NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
- name: host-rootfs
|
||||||
|
mountPath: "/rootfs"
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||||
|
volumes:
|
||||||
|
- name: socket-dir
|
||||||
|
emptyDir: {}
|
||||||
|
#FIXME this seems way too much. Why is it needed at all for this?
|
||||||
|
- name: host-rootfs
|
||||||
|
hostPath:
|
||||||
|
path: /
|
||||||
|
{{- if .Values.provisioner.affinity -}}
|
||||||
|
affinity:
|
||||||
|
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.provisioner.nodeSelector -}}
|
||||||
|
nodeSelector:
|
||||||
|
{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- if .Values.provisioner.tolerations -}}
|
||||||
|
tolerations:
|
||||||
|
{{ toYaml .Values.provisioner.tolerations | indent 8 }}
|
||||||
|
{{- end -}}
|
80
deploy/cephfs/helm/values.yaml
Normal file
80
deploy/cephfs/helm/values.yaml
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
---
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
|
||||||
|
serviceAccounts:
|
||||||
|
attacher:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
nodeplugin:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
provisioner:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
|
||||||
|
socketDir: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||||
|
socketFile: csi.sock
|
||||||
|
registrationDir: /var/lib/kubelet/plugins_registry
|
||||||
|
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
|
||||||
|
driverName: cephfs.csi.ceph.com
|
||||||
|
attacher:
|
||||||
|
name: attacher
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: quay.io/k8scsi/csi-attacher
|
||||||
|
tag: v1.0.1
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
nodeplugin:
|
||||||
|
name: nodeplugin
|
||||||
|
|
||||||
|
registrar:
|
||||||
|
image:
|
||||||
|
repository: quay.io/k8scsi/csi-node-driver-registrar
|
||||||
|
tag: v1.0.2
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
plugin:
|
||||||
|
image:
|
||||||
|
repository: quay.io/cephcsi/cephfsplugin
|
||||||
|
tag: v1.0.0
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
provisioner:
|
||||||
|
name: provisioner
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: quay.io/k8scsi/csi-provisioner
|
||||||
|
tag: v1.0.1
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
@ -1,17 +1,15 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-attacher
|
name: cephfs-csi-attacher
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: external-attacher-runner
|
name: cephfs-external-attacher-runner
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumes"]
|
resources: ["persistentvolumes"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
@ -21,17 +19,20 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments"]
|
resources: ["volumeattachments"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-attacher-role
|
name: cephfs-csi-attacher-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-attacher
|
name: cephfs-csi-attacher
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: external-attacher-runner
|
name: cephfs-external-attacher-runner
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: Service
|
kind: Service
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@ -24,10 +25,10 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-cephfsplugin-attacher
|
app: csi-cephfsplugin-attacher
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-attacher
|
serviceAccount: cephfs-csi-attacher
|
||||||
containers:
|
containers:
|
||||||
- name: csi-cephfsplugin-attacher
|
- name: csi-cephfsplugin-attacher
|
||||||
image: quay.io/k8scsi/csi-attacher:v0.3.0
|
image: quay.io/k8scsi/csi-attacher:v1.0.1
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: Service
|
kind: Service
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@ -24,23 +25,65 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-cephfsplugin-provisioner
|
app: csi-cephfsplugin-provisioner
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-provisioner
|
serviceAccount: cephfs-csi-provisioner
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: quay.io/k8scsi/csi-provisioner:v0.3.0
|
image: quay.io/k8scsi/csi-provisioner:v1.0.1
|
||||||
args:
|
args:
|
||||||
- "--provisioner=cephfs.csi.ceph.com"
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
- name: ADDRESS
|
||||||
value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock
|
value: unix:///csi/csi-provisioner.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
mountPath: /csi
|
||||||
|
- name: csi-cephfsplugin
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
image: quay.io/cephcsi/cephfsplugin:v1.0.0
|
||||||
|
args:
|
||||||
|
- "--nodeid=$(NODE_ID)"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--v=5"
|
||||||
|
- "--drivername=cephfs.csi.ceph.com"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
env:
|
||||||
|
- name: NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi-provisioner.sock
|
||||||
|
imagePullPolicy: "IfNotPresent"
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: host-sys
|
||||||
|
mountPath: /sys
|
||||||
|
- name: lib-modules
|
||||||
|
mountPath: /lib/modules
|
||||||
|
readOnly: true
|
||||||
|
- name: host-dev
|
||||||
|
mountPath: /dev
|
||||||
volumes:
|
volumes:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
|
- name: host-sys
|
||||||
|
hostPath:
|
||||||
|
path: /sys
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: host-dev
|
||||||
|
hostPath:
|
||||||
|
path: /dev
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1beta2
|
||||||
metadata:
|
metadata:
|
||||||
@ -11,30 +12,34 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-cephfsplugin
|
app: csi-cephfsplugin
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-nodeplugin
|
serviceAccount: cephfs-csi-nodeplugin
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||||
# resolved through k8s service, set dns policy to cluster first
|
# resolved through k8s service, set dns policy to cluster first
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: driver-registrar
|
||||||
image: quay.io/k8scsi/driver-registrar:v0.3.0
|
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=/csi/csi.sock"
|
||||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
- "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: [
|
||||||
|
"/bin/sh", "-c",
|
||||||
|
"rm -rf /registration/csi-cephfsplugin \
|
||||||
|
/registration/csi-cephfsplugin-reg.sock"
|
||||||
|
]
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
|
||||||
value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock
|
|
||||||
- name: DRIVER_REG_SOCK_PATH
|
|
||||||
value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock
|
|
||||||
- name: KUBE_NODE_NAME
|
- name: KUBE_NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: plugin-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
mountPath: /csi
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
mountPath: /registration
|
mountPath: /registration
|
||||||
- name: csi-cephfsplugin
|
- name: csi-cephfsplugin
|
||||||
@ -43,13 +48,14 @@ spec:
|
|||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: quay.io/cephcsi/cephfsplugin:v0.3.0
|
image: quay.io/cephcsi/cephfsplugin:v1.0.0
|
||||||
args:
|
args:
|
||||||
- "--nodeid=$(NODE_ID)"
|
- "--nodeid=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--drivername=cephfs.csi.ceph.com"
|
- "--drivername=cephfs.csi.ceph.com"
|
||||||
- "--metadatastorage=k8s_configmap"
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
- "--mountcachedir=/mount-cache-dir"
|
||||||
env:
|
env:
|
||||||
- name: NODE_ID
|
- name: NODE_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
@ -60,38 +66,45 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
value: unix://var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock
|
value: unix:///csi/csi.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
- name: mount-cache-dir
|
||||||
|
mountPath: /mount-cache-dir
|
||||||
- name: plugin-dir
|
- name: plugin-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
mountPath: /csi
|
||||||
|
- name: csi-plugins-dir
|
||||||
|
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
mountPath: /var/lib/kubelet/pods
|
mountPath: /var/lib/kubelet/pods
|
||||||
mountPropagation: "Bidirectional"
|
mountPropagation: "Bidirectional"
|
||||||
- mountPath: /sys
|
- name: host-sys
|
||||||
name: host-sys
|
mountPath: /sys
|
||||||
- name: lib-modules
|
- name: lib-modules
|
||||||
mountPath: /lib/modules
|
mountPath: /lib/modules
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: host-dev
|
- name: host-dev
|
||||||
mountPath: /dev
|
mountPath: /dev
|
||||||
volumes:
|
volumes:
|
||||||
|
- name: mount-cache-dir
|
||||||
|
emptyDir: {}
|
||||||
- name: plugin-dir
|
- name: plugin-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
- name: csi-plugins-dir
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/
|
path: /var/lib/kubelet/plugins_registry/
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/pods
|
path: /var/lib/kubelet/pods
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: socket-dir
|
|
||||||
hostPath:
|
|
||||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
- name: host-sys
|
- name: host-sys
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /sys
|
path: /sys
|
||||||
|
@ -1,14 +1,18 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: cephfs-csi-nodeplugin
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: cephfs-csi-nodeplugin
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
verbs: ["get", "list", "update"]
|
verbs: ["get", "list", "update"]
|
||||||
@ -21,20 +25,17 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments"]
|
resources: ["volumeattachments"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["configmaps"]
|
|
||||||
verbs: ["get", "list", "create", "delete"]
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: cephfs-csi-nodeplugin
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-nodeplugin
|
name: cephfs-csi-nodeplugin
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: csi-nodeplugin
|
name: cephfs-csi-nodeplugin
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,17 +1,24 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-provisioner
|
name: cephfs-csi-provisioner
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: external-provisioner-runner
|
name: cephfs-external-provisioner-runner
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["secrets"]
|
resources: ["secrets"]
|
||||||
verbs: ["get", "list"]
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumes"]
|
resources: ["persistentvolumes"]
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
@ -21,20 +28,52 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["storageclasses"]
|
resources: ["storageclasses"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
resources: ["events"]
|
resources: ["csinodeinfos"]
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-provisioner-role
|
name: cephfs-csi-provisioner-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-provisioner
|
name: cephfs-csi-provisioner
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: external-provisioner-runner
|
name: cephfs-external-provisioner-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
name: cephfs-external-provisioner-cfg
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: cephfs-csi-provisioner-role-cfg
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: cephfs-csi-provisioner
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: cephfs-external-provisioner-cfg
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
appVersion: "0.3.0"
|
appVersion: "1.0.0"
|
||||||
description: Container Storage Interface (CSI) driver, provisioner, and attacher for Ceph RBD
|
description: "Container Storage Interface (CSI) driver,
|
||||||
|
provisioner, snapshotter, and attacher for Ceph RBD"
|
||||||
name: ceph-csi-rbd
|
name: ceph-csi-rbd
|
||||||
version: 0.1.0
|
version: 0.5.1
|
||||||
keywords:
|
keywords:
|
||||||
- ceph
|
- ceph
|
||||||
- rbd
|
- rbd
|
||||||
- ceph-csi
|
- ceph-csi
|
||||||
home: https://github.com/ceph/ceph-csi
|
home: https://github.com/ceph/ceph-csi
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/ceph/ceph-csi/tree/master/deploy/rbd/kubernetes
|
- https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/rbd/helm
|
||||||
|
29
deploy/rbd/helm/README.md
Normal file
29
deploy/rbd/helm/README.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# ceph-csi-rbd
|
||||||
|
|
||||||
|
The ceph-csi-rbd chart adds rbd volume support to your cluster.
|
||||||
|
|
||||||
|
## Install Chart
|
||||||
|
|
||||||
|
To install the Chart into your Kubernetes cluster
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm install --namespace "ceph-csi-rbd" --name "ceph-csi-rbd" ceph-csi/ceph-csi-rbd
|
||||||
|
```
|
||||||
|
|
||||||
|
After installation succeeds, you can get a status of Chart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm status "ceph-csi-rbd"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to delete your Chart, use this command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm delete --purge "ceph-csi-rbd"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to delete the namespace, use this command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete namespace ceph-csi-rbd
|
||||||
|
```
|
@ -1 +1,2 @@
|
|||||||
The Ceph RBD Container Storage Interface has been deployed.
|
Examples on how to configure a storage class and start using the driver are here:
|
||||||
|
https://github.com/ceph/ceph-csi/tree/csi-v1.0/examples/rbd
|
||||||
|
@ -22,4 +22,7 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments"]
|
resources: ["volumeattachments"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
@ -11,6 +11,11 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
serviceName: {{ include "ceph-csi-rbd.attacher.fullname" . }}
|
serviceName: {{ include "ceph-csi-rbd.attacher.fullname" . }}
|
||||||
replicas: {{ .Values.attacher.replicas }}
|
replicas: {{ .Values.attacher.replicas }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
component: {{ .Values.attacher.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
@ -22,4 +22,7 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments"]
|
resources: ["volumeattachments"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
@ -34,21 +34,25 @@ spec:
|
|||||||
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
|
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=/csi/{{ .Values.socketFile }}"
|
||||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
- "--kubelet-registration-path={{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: [
|
||||||
|
"/bin/sh", "-c",
|
||||||
|
'rm -rf /registration/{{ .Values.driverName }}
|
||||||
|
/registration/{{ .Values.driverName }}-reg.sock'
|
||||||
|
]
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
|
||||||
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
|
||||||
- name: DRIVER_REG_SOCK_PATH
|
|
||||||
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
|
||||||
- name: KUBE_NODE_NAME
|
- name: KUBE_NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }}
|
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: plugin-dir
|
||||||
mountPath: {{ .Values.socketDir }}
|
mountPath: /csi
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
mountPath: /registration
|
mountPath: /registration
|
||||||
resources:
|
resources:
|
||||||
@ -64,11 +68,14 @@ spec:
|
|||||||
- "--nodeid=$(NODE_ID)"
|
- "--nodeid=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--drivername=rbd.csi.ceph.com"
|
- "--drivername=$(DRIVER_NAME)"
|
||||||
- "--containerized=true"
|
- "--containerized=true"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
env:
|
env:
|
||||||
- name: HOST_ROOTFS
|
- name: HOST_ROOTFS
|
||||||
value: "/rootfs"
|
value: "/rootfs"
|
||||||
|
- name: DRIVER_NAME
|
||||||
|
value: {{ .Values.driverName }}
|
||||||
- name: NODE_ID
|
- name: NODE_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
@ -82,6 +89,9 @@ spec:
|
|||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
mountPath: /var/lib/kubelet/pods
|
mountPath: /var/lib/kubelet/pods
|
||||||
mountPropagation: "Bidirectional"
|
mountPropagation: "Bidirectional"
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
mountPath: {{ .Values.volumeDevicesDir }}
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
- mountPath: /dev
|
- mountPath: /dev
|
||||||
name: host-dev
|
name: host-dev
|
||||||
- mountPath: /rootfs
|
- mountPath: /rootfs
|
||||||
@ -98,18 +108,18 @@ spec:
|
|||||||
hostPath:
|
hostPath:
|
||||||
path: {{ .Values.socketDir }}
|
path: {{ .Values.socketDir }}
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
hostPath:
|
||||||
|
path: {{ .Values.volumeDevicesDir }}
|
||||||
|
type: DirectoryOrCreate
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/
|
path: {{ .Values.registrationDir }}
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/pods
|
path: /var/lib/kubelet/pods
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: socket-dir
|
|
||||||
hostPath:
|
|
||||||
path: {{ .Values.socketDir }}
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
- name: host-dev
|
- name: host-dev
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /dev
|
path: /dev
|
||||||
|
@ -10,6 +10,9 @@ metadata:
|
|||||||
release: {{ .Release.Name }}
|
release: {{ .Release.Name }}
|
||||||
heritage: {{ .Release.Service }}
|
heritage: {{ .Release.Service }}
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["secrets"]
|
resources: ["secrets"]
|
||||||
verbs: ["get", "list"]
|
verbs: ["get", "list"]
|
||||||
@ -25,4 +28,25 @@ rules:
|
|||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "create", "update"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["apiextensions.k8s.io"]
|
||||||
|
resources: ["customresourcedefinitions"]
|
||||||
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
16
deploy/rbd/helm/templates/provisioner-role.yaml
Normal file
16
deploy/rbd/helm/templates/provisioner-role.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
{{- end -}}
|
21
deploy/rbd/helm/templates/provisioner-rolebinding.yaml
Normal file
21
deploy/rbd/helm/templates/provisioner-rolebinding.yaml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
{{- if .Values.rbac.create -}}
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
|
||||||
|
labels:
|
||||||
|
app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
chart: {{ include "ceph-csi-rbd.chart" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
|
heritage: {{ .Release.Service }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end -}}
|
@ -11,6 +11,11 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
serviceName: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
|
serviceName: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
|
||||||
replicas: {{ .Values.provisioner.replicas }}
|
replicas: {{ .Values.provisioner.replicas }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: {{ include "ceph-csi-rbd.name" . }}
|
||||||
|
component: {{ .Values.provisioner.name }}
|
||||||
|
release: {{ .Release.Name }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
@ -25,7 +30,6 @@ spec:
|
|||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
|
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
|
||||||
args:
|
args:
|
||||||
- "--provisioner=rbd.csi.ceph.com"
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
env:
|
env:
|
||||||
@ -37,11 +41,63 @@ spec:
|
|||||||
mountPath: {{ .Values.socketDir }}
|
mountPath: {{ .Values.socketDir }}
|
||||||
resources:
|
resources:
|
||||||
{{ toYaml .Values.provisioner.resources | indent 12 }}
|
{{ toYaml .Values.provisioner.resources | indent 12 }}
|
||||||
|
- name: csi-snapshotter
|
||||||
|
image: {{ .Values.snapshotter.image.repository }}:{{ .Values.snapshotter.image.tag }}
|
||||||
|
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||||
|
args:
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
- "--connection-timeout=15s"
|
||||||
|
- "--v=5"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.snapshotter.resources | indent 12 }}
|
||||||
|
- name: csi-rbdplugin
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
|
||||||
|
args :
|
||||||
|
- "--nodeid=$(NODE_ID)"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--v=5"
|
||||||
|
- "--drivername=$(DRIVER_NAME)"
|
||||||
|
- "--containerized=true"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
env:
|
||||||
|
- name: HOST_ROOTFS
|
||||||
|
value: "/rootfs"
|
||||||
|
- name: DRIVER_NAME
|
||||||
|
value: {{ .Values.driverName }}
|
||||||
|
- name: NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||||
|
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: {{ .Values.socketDir }}
|
||||||
|
- name: host-rootfs
|
||||||
|
mountPath: "/rootfs"
|
||||||
|
resources:
|
||||||
|
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||||
volumes:
|
volumes:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
|
emptyDir: {}
|
||||||
|
#FIXME this seems way too much. Why is it needed at all for this?
|
||||||
|
- name: host-rootfs
|
||||||
hostPath:
|
hostPath:
|
||||||
path: {{ .Values.socketDir }}
|
path: /
|
||||||
type: DirectoryOrCreate
|
|
||||||
{{- if .Values.provisioner.affinity -}}
|
{{- if .Values.provisioner.affinity -}}
|
||||||
affinity:
|
affinity:
|
||||||
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
|
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
rbac:
|
rbac:
|
||||||
create: true
|
create: true
|
||||||
|
|
||||||
@ -14,6 +15,9 @@ serviceAccounts:
|
|||||||
|
|
||||||
socketDir: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
socketDir: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||||
socketFile: csi.sock
|
socketFile: csi.sock
|
||||||
|
registrationDir: /var/lib/kubelet/plugins_registry
|
||||||
|
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
|
||||||
|
driverName: rbd.csi.ceph.com
|
||||||
|
|
||||||
attacher:
|
attacher:
|
||||||
name: attacher
|
name: attacher
|
||||||
@ -22,7 +26,7 @@ attacher:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: quay.io/k8scsi/csi-attacher
|
repository: quay.io/k8scsi/csi-attacher
|
||||||
tag: v0.3.0
|
tag: v1.0.1
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
@ -38,8 +42,8 @@ nodeplugin:
|
|||||||
|
|
||||||
registrar:
|
registrar:
|
||||||
image:
|
image:
|
||||||
repository: quay.io/k8scsi/driver-registrar
|
repository: quay.io/k8scsi/csi-node-driver-registrar
|
||||||
tag: v0.3.0
|
tag: v1.0.2
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
@ -47,7 +51,7 @@ nodeplugin:
|
|||||||
plugin:
|
plugin:
|
||||||
image:
|
image:
|
||||||
repository: quay.io/cephcsi/rbdplugin
|
repository: quay.io/cephcsi/rbdplugin
|
||||||
tag: v0.3.0
|
tag: v1.0.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
@ -65,7 +69,7 @@ provisioner:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: quay.io/k8scsi/csi-provisioner
|
repository: quay.io/k8scsi/csi-provisioner
|
||||||
tag: v0.3.0
|
tag: v1.0.1
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
resources: {}
|
resources: {}
|
||||||
@ -75,3 +79,11 @@ provisioner:
|
|||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
|
snapshotter:
|
||||||
|
image:
|
||||||
|
repository: quay.io/k8scsi/csi-snapshotter
|
||||||
|
tag: v1.0.1
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
@ -1,17 +1,15 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-attacher
|
name: rbd-csi-attacher
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: external-attacher-runner
|
name: rbd-external-attacher-runner
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumes"]
|
resources: ["persistentvolumes"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
@ -21,17 +19,20 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["volumeattachments"]
|
resources: ["volumeattachments"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-attacher-role
|
name: rbd-csi-attacher-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-attacher
|
name: rbd-csi-attacher
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: external-attacher-runner
|
name: rbd-external-attacher-runner
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,14 +1,18 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: rbd-csi-nodeplugin
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: rbd-csi-nodeplugin
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
verbs: ["get", "list", "update"]
|
verbs: ["get", "list", "update"]
|
||||||
@ -23,18 +27,18 @@ rules:
|
|||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["configmaps"]
|
resources: ["configmaps"]
|
||||||
verbs: ["get", "list", "create", "delete"]
|
verbs: ["get", "list"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-nodeplugin
|
name: rbd-csi-nodeplugin
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-nodeplugin
|
name: rbd-csi-nodeplugin
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: csi-nodeplugin
|
name: rbd-csi-nodeplugin
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,17 +1,24 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-provisioner
|
name: rbd-csi-provisioner
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: external-provisioner-runner
|
name: rbd-external-provisioner-runner
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["secrets"]
|
resources: ["secrets"]
|
||||||
verbs: ["get", "list"]
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumes"]
|
resources: ["persistentvolumes"]
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
@ -21,20 +28,64 @@ rules:
|
|||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["storageclasses"]
|
resources: ["storageclasses"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
resources: ["events"]
|
resources: ["volumesnapshots"]
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["apiextensions.k8s.io"]
|
||||||
|
resources: ["customresourcedefinitions"]
|
||||||
|
verbs: ["create"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-provisioner-role
|
name: rbd-csi-provisioner-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-provisioner
|
name: rbd-csi-provisioner
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: external-provisioner-runner
|
name: rbd-external-provisioner-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
name: rbd-external-provisioner-cfg
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["configmaps"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: rbd-csi-provisioner-role-cfg
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: rbd-csi-provisioner
|
||||||
|
# replace with non-default namespace name
|
||||||
|
namespace: default
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: rbd-external-provisioner-cfg
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: Service
|
kind: Service
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@ -24,20 +25,20 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-rbdplugin-attacher
|
app: csi-rbdplugin-attacher
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-attacher
|
serviceAccount: rbd-csi-attacher
|
||||||
containers:
|
containers:
|
||||||
- name: csi-rbdplugin-attacher
|
- name: csi-rbdplugin-attacher
|
||||||
image: quay.io/k8scsi/csi-attacher:v0.3.0
|
image: quay.io/k8scsi/csi-attacher:v1.0.1
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
- name: ADDRESS
|
||||||
value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock
|
value: unix:///csi/csi-attacher.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
mountPath: /csi
|
||||||
volumes:
|
volumes:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: Service
|
kind: Service
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
@ -24,22 +25,88 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-rbdplugin-provisioner
|
app: csi-rbdplugin-provisioner
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-provisioner
|
serviceAccount: rbd-csi-provisioner
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: quay.io/k8scsi/csi-provisioner:v0.3.0
|
image: quay.io/k8scsi/csi-provisioner:v1.0.1
|
||||||
args:
|
args:
|
||||||
- "--provisioner=rbd.csi.ceph.com"
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
- name: ADDRESS
|
||||||
value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock
|
value: unix:///csi/csi-provisioner.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
mountPath: /csi
|
||||||
|
- name: csi-snapshotter
|
||||||
|
image: quay.io/k8scsi/csi-snapshotter:v1.0.1
|
||||||
|
args:
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
- "--connection-timeout=15s"
|
||||||
|
- "--v=5"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: unix:///csi/csi-provisioner.sock
|
||||||
|
imagePullPolicy: Always
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: csi-rbdplugin
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
image: quay.io/cephcsi/rbdplugin:v1.0.0
|
||||||
|
args:
|
||||||
|
- "--nodeid=$(NODE_ID)"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--v=5"
|
||||||
|
- "--drivername=rbd.csi.ceph.com"
|
||||||
|
- "--containerized=true"
|
||||||
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
- "--configroot=k8s_objects"
|
||||||
|
env:
|
||||||
|
- name: HOST_ROOTFS
|
||||||
|
value: "/rootfs"
|
||||||
|
- name: NODE_ID
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi-provisioner.sock
|
||||||
|
imagePullPolicy: "IfNotPresent"
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- mountPath: /dev
|
||||||
|
name: host-dev
|
||||||
|
- mountPath: /rootfs
|
||||||
|
name: host-rootfs
|
||||||
|
- mountPath: /sys
|
||||||
|
name: host-sys
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
volumes:
|
volumes:
|
||||||
|
- name: host-dev
|
||||||
|
hostPath:
|
||||||
|
path: /dev
|
||||||
|
- name: host-rootfs
|
||||||
|
hostPath:
|
||||||
|
path: /
|
||||||
|
- name: host-sys
|
||||||
|
hostPath:
|
||||||
|
path: /sys
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1beta2
|
||||||
metadata:
|
metadata:
|
||||||
@ -11,7 +12,7 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-rbdplugin
|
app: csi-rbdplugin
|
||||||
spec:
|
spec:
|
||||||
serviceAccount: csi-nodeplugin
|
serviceAccount: rbd-csi-nodeplugin
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||||
@ -19,23 +20,27 @@ spec:
|
|||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: driver-registrar
|
||||||
image: quay.io/k8scsi/driver-registrar:v0.3.0
|
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=/csi/csi.sock"
|
||||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: [
|
||||||
|
"/bin/sh", "-c",
|
||||||
|
"rm -rf /registration/rbd.csi.ceph.com \
|
||||||
|
/registration/rbd.csi.ceph.com-reg.sock"
|
||||||
|
]
|
||||||
env:
|
env:
|
||||||
- name: ADDRESS
|
|
||||||
value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock
|
|
||||||
- name: DRIVER_REG_SOCK_PATH
|
|
||||||
value: /var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock
|
|
||||||
- name: KUBE_NODE_NAME
|
- name: KUBE_NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: socket-dir
|
- name: plugin-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
mountPath: /csi
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
mountPath: /registration
|
mountPath: /registration
|
||||||
- name: csi-rbdplugin
|
- name: csi-rbdplugin
|
||||||
@ -44,7 +49,7 @@ spec:
|
|||||||
capabilities:
|
capabilities:
|
||||||
add: ["SYS_ADMIN"]
|
add: ["SYS_ADMIN"]
|
||||||
allowPrivilegeEscalation: true
|
allowPrivilegeEscalation: true
|
||||||
image: quay.io/cephcsi/rbdplugin:v0.3.0
|
image: quay.io/cephcsi/rbdplugin:v1.0.0
|
||||||
args:
|
args:
|
||||||
- "--nodeid=$(NODE_ID)"
|
- "--nodeid=$(NODE_ID)"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
@ -52,6 +57,7 @@ spec:
|
|||||||
- "--drivername=rbd.csi.ceph.com"
|
- "--drivername=rbd.csi.ceph.com"
|
||||||
- "--containerized=true"
|
- "--containerized=true"
|
||||||
- "--metadatastorage=k8s_configmap"
|
- "--metadatastorage=k8s_configmap"
|
||||||
|
- "--configroot=k8s_objects"
|
||||||
env:
|
env:
|
||||||
- name: HOST_ROOTFS
|
- name: HOST_ROOTFS
|
||||||
value: "/rootfs"
|
value: "/rootfs"
|
||||||
@ -64,14 +70,17 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
value: unix://var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock
|
value: unix:///csi/csi.sock
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: plugin-dir
|
- name: plugin-dir
|
||||||
mountPath: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
mountPath: /csi
|
||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
mountPath: /var/lib/kubelet/pods
|
mountPath: /var/lib/kubelet/pods
|
||||||
mountPropagation: "Bidirectional"
|
mountPropagation: "Bidirectional"
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
- mountPath: /dev
|
- mountPath: /dev
|
||||||
name: host-dev
|
name: host-dev
|
||||||
- mountPath: /rootfs
|
- mountPath: /rootfs
|
||||||
@ -86,18 +95,18 @@ spec:
|
|||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
|
- name: plugin-mount-dir
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/
|
||||||
|
type: DirectoryOrCreate
|
||||||
- name: registration-dir
|
- name: registration-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/plugins/
|
path: /var/lib/kubelet/plugins_registry/
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: pods-mount-dir
|
- name: pods-mount-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/lib/kubelet/pods
|
path: /var/lib/kubelet/pods
|
||||||
type: Directory
|
type: Directory
|
||||||
- name: socket-dir
|
|
||||||
hostPath:
|
|
||||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
- name: host-dev
|
- name: host-dev
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /dev
|
path: /dev
|
||||||
|
@ -1,10 +1,15 @@
|
|||||||
# CSI CephFS plugin
|
# CSI CephFS plugin
|
||||||
|
|
||||||
The CSI CephFS plugin is able to both provision new CephFS volumes and attach and mount existing ones to workloads.
|
The CSI CephFS plugin is able to both provision new CephFS volumes
|
||||||
|
and attach and mount existing ones to workloads.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
CSI CephFS plugin can be compiled in the form of a binary file or in the form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `cephfsplugin`. When compiled as an image, it's stored in the local Docker image store.
|
CSI CephFS plugin can be compiled in the form of a binary file or in the form
|
||||||
|
of a Docker image.
|
||||||
|
When compiled as a binary file, the result is stored in `_output/`
|
||||||
|
directory with the name `cephfsplugin`.
|
||||||
|
When compiled as an image, it's stored in the local Docker image store.
|
||||||
|
|
||||||
Building binary:
|
Building binary:
|
||||||
|
|
||||||
@ -29,24 +34,29 @@ Option | Default value | Description
|
|||||||
`--nodeid` | _empty_ | This node's ID
|
`--nodeid` | _empty_ | This node's ID
|
||||||
`--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed.
|
`--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed.
|
||||||
`--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
`--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
||||||
|
`--mountcachedir` | _empty_ | volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached.
|
||||||
|
|
||||||
**Available environmental variables:**
|
**Available environmental variables:**
|
||||||
|
|
||||||
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify the path of your k8s config file (if not specified, the plugin will assume you're running it inside a k8s cluster and find the config itself).
|
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
|
||||||
|
the path of your k8s config file (if not specified, the plugin will assume
|
||||||
|
you're running it inside a k8s cluster and find the config itself).
|
||||||
|
|
||||||
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE` is used to define in which namespace you want the configmaps to be stored
|
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE`
|
||||||
|
is used to define in which namespace you want the configmaps to be stored
|
||||||
|
|
||||||
**Available volume parameters:**
|
**Available volume parameters:**
|
||||||
|
|
||||||
Parameter | Required | Description
|
Parameter | Required | Description
|
||||||
---------------------------------------------------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------------------------|--------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||||
|
`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. If both `monitors` and `monValueFromSecret` are set and the monitors set in the secret exists, `monValueFromSecret` takes precedence.
|
||||||
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
|
||||||
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used.
|
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used.
|
||||||
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
|
||||||
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
|
||||||
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
||||||
`csiProvisionerSecretNamespace`, `csiNodeStageSecretNamespace` | for Kubernetes | namespaces of the above Secret objects
|
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-stage-secret-namespace` | for Kubernetes | namespaces of the above Secret objects
|
||||||
|
|
||||||
**Required secrets for `provisionVolume=true`:**
|
**Required secrets for `provisionVolume=true`:**
|
||||||
Admin credentials are required for provisioning new volumes
|
Admin credentials are required for provisioning new volumes
|
||||||
@ -60,13 +70,20 @@ User credentials with access to an existing volume
|
|||||||
* `userID`: ID of a user client
|
* `userID`: ID of a user client
|
||||||
* `userKey`: key of a user client
|
* `userKey`: key of a user client
|
||||||
|
|
||||||
Notes on volume size: when provisioning a new volume, `max_bytes` quota attribute for this volume will be set to the requested volume size (see [Ceph quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request for a zero-sized volume means no quota attribute will be set.
|
Notes on volume size: when provisioning a new volume, `max_bytes` quota
|
||||||
|
attribute for this volume will be set to the requested volume size (see [Ceph
|
||||||
|
quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request
|
||||||
|
for a zero-sized volume means no quota attribute will be set.
|
||||||
|
|
||||||
## Deployment with Kubernetes
|
## Deployment with Kubernetes
|
||||||
|
|
||||||
Requires Kubernetes 1.11
|
Requires Kubernetes 1.13
|
||||||
|
|
||||||
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged` flag must be set to true for both the API server and the kubelet). Moreover, as stated in the [mount propagation docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation), the Docker daemon of the cluster nodes must allow shared mounts.
|
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
|
||||||
|
flag must be set to true for both the API server and the kubelet). Moreover, as
|
||||||
|
stated in the [mount propagation
|
||||||
|
docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation),
|
||||||
|
the Docker daemon of the cluster nodes must allow shared mounts.
|
||||||
|
|
||||||
YAML manifests are located in `deploy/cephfs/kubernetes`.
|
YAML manifests are located in `deploy/cephfs/kubernetes`.
|
||||||
|
|
||||||
@ -78,7 +95,9 @@ kubectl create -f csi-provisioner-rbac.yaml
|
|||||||
kubectl create -f csi-nodeplugin-rbac.yaml
|
kubectl create -f csi-nodeplugin-rbac.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Those manifests deploy service accounts, cluster roles and cluster role bindings. These are shared for both RBD and CephFS CSI plugins, as they require the same permissions.
|
Those manifests deploy service accounts, cluster roles and cluster role
|
||||||
|
bindings. These are shared for both RBD and CephFS CSI plugins, as they require
|
||||||
|
the same permissions.
|
||||||
|
|
||||||
**Deploy CSI sidecar containers:**
|
**Deploy CSI sidecar containers:**
|
||||||
|
|
||||||
@ -87,7 +106,8 @@ kubectl create -f csi-cephfsplugin-attacher.yaml
|
|||||||
kubectl create -f csi-cephfsplugin-provisioner.yaml
|
kubectl create -f csi-cephfsplugin-provisioner.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploys stateful sets for external-attacher and external-provisioner sidecar containers for CSI CephFS.
|
Deploys stateful sets for external-attacher and external-provisioner
|
||||||
|
sidecar containers for CSI CephFS.
|
||||||
|
|
||||||
**Deploy CSI CephFS driver:**
|
**Deploy CSI CephFS driver:**
|
||||||
|
|
||||||
@ -95,14 +115,15 @@ Deploys stateful sets for external-attacher and external-provisioner sidecar con
|
|||||||
kubectl create -f csi-cephfsplugin.yaml
|
kubectl create -f csi-cephfsplugin.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploys a daemon set with two containers: CSI driver-registrar and the CSI CephFS driver.
|
Deploys a daemon set with two containers: CSI driver-registrar and
|
||||||
|
the CSI CephFS driver.
|
||||||
|
|
||||||
## Verifying the deployment in Kubernetes
|
## Verifying the deployment in Kubernetes
|
||||||
|
|
||||||
After successfuly completing the steps above, you should see output similar to this:
|
After successfully completing the steps above, you should see output similar to this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl get all
|
$ kubectl get all
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
pod/csi-cephfsplugin-attacher-0 1/1 Running 0 26s
|
pod/csi-cephfsplugin-attacher-0 1/1 Running 0 26s
|
||||||
pod/csi-cephfsplugin-provisioner-0 1/1 Running 0 25s
|
pod/csi-cephfsplugin-provisioner-0 1/1 Running 0 25s
|
||||||
@ -119,4 +140,7 @@ You can try deploying a demo pod from `examples/cephfs` to test the deployment f
|
|||||||
|
|
||||||
### Notes on volume deletion
|
### Notes on volume deletion
|
||||||
|
|
||||||
Volumes that were provisioned dynamically (i.e. `provisionVolume=true`) are allowed to be deleted by the driver as well, if the user chooses to do so. Otherwise, the driver is forbidden to delete such volumes - attempting to delete them is a no-op.
|
Volumes that were provisioned dynamically (i.e. `provisionVolume=true`) are
|
||||||
|
allowed to be deleted by the driver as well, if the user chooses to do
|
||||||
|
so.Otherwise, the driver is forbidden to delete such volumes - attempting to
|
||||||
|
delete them is a no-op.
|
||||||
|
@ -1,10 +1,14 @@
|
|||||||
# CSI RBD Plugin
|
# CSI RBD Plugin
|
||||||
|
|
||||||
The RBD CSI plugin is able to provision new RBD images and attach and mount those to workloads.
|
The RBD CSI plugin is able to provision new RBD images and
|
||||||
|
attach and mount those to workloads.
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
CSI RBD plugin can be compiled in a form of a binary file or in a form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `rbdplugin`. When compiled as an image, it's stored in the local Docker image store.
|
CSI RBD plugin can be compiled in a form of a binary file or in a form of a
|
||||||
|
Docker image. When compiled as a binary file, the result is stored in
|
||||||
|
`_output/` directory with the name `rbdplugin`. When compiled as an image, it's
|
||||||
|
stored in the local Docker image store.
|
||||||
|
|
||||||
Building binary:
|
Building binary:
|
||||||
|
|
||||||
@ -23,44 +27,66 @@ make image-rbdplugin
|
|||||||
**Available command line arguments:**
|
**Available command line arguments:**
|
||||||
|
|
||||||
Option | Default value | Description
|
Option | Default value | Description
|
||||||
--------------------|-----------------------|---------------------------------------------------------------------------------------------------
|
------ | ------------- | -----------
|
||||||
`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket
|
`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket
|
||||||
`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
||||||
`--nodeid` | _empty_ | This node's ID
|
`--nodeid` | _empty_ | This node's ID
|
||||||
`--containerized` | true | Whether running in containerized mode
|
`--containerized` | true | Whether running in containerized mode
|
||||||
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
||||||
|
`--configroot` | `/etc/csi-config` | Directory in which CSI specific Ceph cluster configurations are present, OR the value `k8s_objects` if present as kubernetes secrets"
|
||||||
|
|
||||||
**Available environmental variables:**
|
**Available environmental variables:**
|
||||||
|
|
||||||
`HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`.
|
`HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`.
|
||||||
|
|
||||||
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify the path of your k8s config file (if not specified, the plugin will assume you're running it inside a k8s cluster and find the config itself).
|
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
|
||||||
|
the path of your k8s config file (if not specified, the plugin will assume
|
||||||
|
you're running it inside a k8s cluster and find the config itself).
|
||||||
|
|
||||||
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE` is used to define in which namespace you want the configmaps to be stored
|
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store,
|
||||||
|
`POD_NAMESPACE` is used to define in which namespace you want
|
||||||
|
the configmaps to be stored
|
||||||
|
|
||||||
**Available volume parameters:**
|
**Available volume parameters:**
|
||||||
|
|
||||||
Parameter | Required | Description
|
Parameter | Required | Description
|
||||||
-----------------------------------------------------------------|--------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
--------- | -------- | -----------
|
||||||
`monitors` | one of `monitors` and `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
`monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||||
`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
`monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
||||||
|
`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use
|
||||||
`pool` | yes | Ceph pool into which the RBD image shall be created
|
`pool` | yes | Ceph pool into which the RBD image shall be created
|
||||||
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
|
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
|
||||||
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
|
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
|
||||||
`csiProvisionerSecretName`, `csiNodePublishSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
|
||||||
`csiProvisionerSecretNamespace`, `csiNodePublishSecretNamespace` | for Kubernetes | namespaces of the above Secret objects
|
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects
|
||||||
`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images
|
`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images
|
||||||
|
|
||||||
**Required secrets:**
|
NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster
|
||||||
Admin credentials are required for provisioning new RBD images
|
configuration secret or config files needs to be provided to the running pods.
|
||||||
`ADMIN_NAME`: `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the client with admin privileges, and the value is its password
|
Refer to [Cluster ID based configuration](../examples/README.md#cluster-id-based-configuration)
|
||||||
|
for more information. A suggested way to populate the clusterID is to use the
|
||||||
|
output of `ceph fsid` of the Ceph cluster to be used for provisioning.
|
||||||
|
|
||||||
|
**Required secrets:**
|
||||||
|
|
||||||
|
Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
|
||||||
|
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
|
||||||
|
client with admin privileges, and the value is its password
|
||||||
|
|
||||||
|
If clusterID is specified, then a secret with various keys and values as
|
||||||
|
specified in `examples/rbd/template-ceph-cluster-ID-secret.yaml` needs to be
|
||||||
|
created, with the secret name matching the string value provided as the
|
||||||
|
`clusterID`.
|
||||||
|
|
||||||
## Deployment with Kubernetes
|
## Deployment with Kubernetes
|
||||||
|
|
||||||
Requires Kubernetes 1.11
|
Requires Kubernetes 1.11
|
||||||
|
|
||||||
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged` flag must be set to true for both the API server and the kubelet). Moreover, as stated in the [mount propagation docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation), the Docker daemon of the cluster nodes must allow shared mounts.
|
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
|
||||||
|
flag must be set to true for both the API server and the kubelet). Moreover, as
|
||||||
|
stated in the [mount propagation
|
||||||
|
docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation),
|
||||||
|
the Docker daemon of the cluster nodes must allow shared mounts.
|
||||||
|
|
||||||
YAML manifests are located in `deploy/rbd/kubernetes`.
|
YAML manifests are located in `deploy/rbd/kubernetes`.
|
||||||
|
|
||||||
@ -72,7 +98,9 @@ kubectl create -f csi-provisioner-rbac.yaml
|
|||||||
kubectl create -f csi-nodeplugin-rbac.yaml
|
kubectl create -f csi-nodeplugin-rbac.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Those manifests deploy service accounts, cluster roles and cluster role bindings. These are shared for both RBD and CephFS CSI plugins, as they require the same permissions.
|
Those manifests deploy service accounts, cluster roles and cluster role
|
||||||
|
bindings. These are shared for both RBD and CephFS CSI plugins, as they require
|
||||||
|
the same permissions.
|
||||||
|
|
||||||
**Deploy CSI sidecar containers:**
|
**Deploy CSI sidecar containers:**
|
||||||
|
|
||||||
@ -81,7 +109,8 @@ kubectl create -f csi-rbdplugin-attacher.yaml
|
|||||||
kubectl create -f csi-rbdplugin-provisioner.yaml
|
kubectl create -f csi-rbdplugin-provisioner.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Deploys stateful sets for external-attacher and external-provisioner sidecar containers for CSI RBD.
|
Deploys stateful sets for external-attacher and external-provisioner
|
||||||
|
sidecar containers for CSI RBD.
|
||||||
|
|
||||||
**Deploy RBD CSI driver:**
|
**Deploy RBD CSI driver:**
|
||||||
|
|
||||||
@ -93,7 +122,7 @@ Deploys a daemon set with two containers: CSI driver-registrar and the CSI RBD d
|
|||||||
|
|
||||||
## Verifying the deployment in Kubernetes
|
## Verifying the deployment in Kubernetes
|
||||||
|
|
||||||
After successfuly completing the steps above, you should see output similar to this:
|
After successfully completing the steps above, you should see output similar to this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ kubectl get all
|
$ kubectl get all
|
||||||
|
@ -1,10 +1,24 @@
|
|||||||
# How to test RBD and CephFS plugins with Kubernetes 1.11
|
# How to test RBD and CephFS plugins with Kubernetes 1.13
|
||||||
|
|
||||||
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and `plugin-teardown.sh` helper scripts. You can use those to help you deploy/tear down RBACs, sidecar containers and the plugin in one go. By default, they look for the YAML manifests in `../../deploy/{rbd,cephfs}/kubernetes`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
|
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
|
||||||
|
`plugin-teardown.sh` helper scripts. You can use those to help you
|
||||||
|
deploy/teardown RBACs, sidecar containers and the plugin in one go.
|
||||||
|
By default, they look for the YAML manifests in
|
||||||
|
`../../deploy/{rbd,cephfs}/kubernetes`.
|
||||||
|
You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
|
||||||
|
|
||||||
Once the plugin is successfully deployed, you'll need to customize `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster setup. Please consult the documentation for info about available parameters.
|
Once the plugin is successfully deployed, you'll need to customize
|
||||||
|
`storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster
|
||||||
|
setup.
|
||||||
|
Please consult the documentation for info about available parameters.
|
||||||
|
|
||||||
After configuring the secrets, monitors, etc. you can deploy a testing Pod mounting a RBD image / CephFS volume:
|
**NOTE:** See section
|
||||||
|
[Cluster ID based configuration](#cluster-id-based-configuration) if using
|
||||||
|
the `clusterID` instead of `monitors` or `monValueFromSecret` option in the
|
||||||
|
storage class for RBD based provisioning before proceeding.
|
||||||
|
|
||||||
|
After configuring the secrets, monitors, etc. you can deploy a
|
||||||
|
testing Pod mounting a RBD image / CephFS volume:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create -f secret.yaml
|
kubectl create -f secret.yaml
|
||||||
@ -20,42 +34,221 @@ Other helper scripts:
|
|||||||
|
|
||||||
## How to test RBD Snapshot feature
|
## How to test RBD Snapshot feature
|
||||||
|
|
||||||
Before continuing, make sure you enabled the required [feature gate](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html#snapshot-apis) in your Kubernetes cluster.
|
Before continuing, make sure you enabled the required
|
||||||
|
feature gate `VolumeSnapshotDataSource=true` in your Kubernetes cluster.
|
||||||
|
|
||||||
In the `examples/rbd` directory you will find four files related to snapshots: [csi-snapshotter-rbac.yaml](./rbd/csi-snapshotter-rbac.yaml), [csi-snapshotter.yaml](./rbd/csi-snapshotter.yaml), [snapshotclass.yaml](./rbd/snapshotclass.yaml) and [snapshot.yaml](./rbd/snapshot.yaml).
|
In the `examples/rbd` directory you will find two files related to snapshots:
|
||||||
|
[snapshotclass.yaml](./rbd/snapshotclass.yaml) and
|
||||||
|
[snapshot.yaml](./rbd/snapshot.yaml).
|
||||||
|
|
||||||
Once you created your RBD volume, you'll need to customize at least `snapshotclass.yaml` and make sure the `monitors` and `pool` parameters match your Ceph cluster setup. If you followed the documentation to create the rbdplugin, you shouldn't have to edit any other file. If you didn't, make sure every parameters in `csi-snapshotter.yaml` reflect your configuration.
|
Once you created your RBD volume, you'll need to customize at least
|
||||||
|
`snapshotclass.yaml` and make sure the `monitors` and `pool` parameters match
|
||||||
|
your Ceph cluster setup.
|
||||||
|
If you followed the documentation to create the rbdplugin, you shouldn't
|
||||||
|
have to edit any other file.
|
||||||
|
|
||||||
After configuring everything you needed, deploy the csi-snapshotter:
|
After configuring everything you needed, deploy the snapshot class:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create -f csi-snapshotter-rbac.yaml
|
|
||||||
kubectl create -f csi-snapshotter.yaml
|
|
||||||
kubectl create -f snapshotclass.yaml
|
kubectl create -f snapshotclass.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify that the snapshot class was created:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl get volumesnapshotclass
|
||||||
|
NAME AGE
|
||||||
|
csi-rbdplugin-snapclass 4s
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a snapshot from the existing PVC:
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl create -f snapshot.yaml
|
kubectl create -f snapshot.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
To verify if your volume snapshot has successfully been created, run the following:
|
To verify if your volume snapshot has successfully been created, run the following:
|
||||||
|
|
||||||
```bash
|
```console
|
||||||
$ kubectl get volumesnapshotclass
|
|
||||||
NAME AGE
|
|
||||||
csi-rbdplugin-snapclass 4s
|
|
||||||
$ kubectl get volumesnapshot
|
$ kubectl get volumesnapshot
|
||||||
NAME AGE
|
NAME AGE
|
||||||
rbd-pvc-snapshot 6s
|
rbd-pvc-snapshot 6s
|
||||||
```
|
```
|
||||||
|
|
||||||
To be sure everything is OK you can run `rbd snap ls [your-pvc-name]` inside one of your Ceph pod.
|
To check the status of the snapshot, run the following:
|
||||||
|
|
||||||
## How to test RBD Block Volume
|
|
||||||
|
|
||||||
Before continuing, make sure you enabled the required [feature gate](https://kubernetes-csi.github.io/docs/raw-block.html#usage) in your Kubernetes cluster.
|
|
||||||
|
|
||||||
In order to create a PVC in Block mode, you need to specify `volumeMode: Block` in the PVC spec.
|
|
||||||
In the `examples/rbd` directory, you can test RBD block volume by running:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl create -f pvc-block.yaml
|
$ kubectl describe volumesnapshot rbd-pvc-snapshot
|
||||||
kubectl create -f pod-block.yaml
|
|
||||||
|
Name: rbd-pvc-snapshot
|
||||||
|
Namespace: default
|
||||||
|
Labels: <none>
|
||||||
|
Annotations: <none>
|
||||||
|
API Version: snapshot.storage.k8s.io/v1alpha1
|
||||||
|
Kind: VolumeSnapshot
|
||||||
|
Metadata:
|
||||||
|
Creation Timestamp: 2019-02-06T08:52:34Z
|
||||||
|
Finalizers:
|
||||||
|
snapshot.storage.kubernetes.io/volumesnapshot-protection
|
||||||
|
Generation: 5
|
||||||
|
Resource Version: 84239
|
||||||
|
Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/namespaces/default/volumesnapshots/rbd-pvc-snapshot
|
||||||
|
UID: 8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
|
||||||
|
Spec:
|
||||||
|
Snapshot Class Name: csi-rbdplugin-snapclass
|
||||||
|
Snapshot Content Name: snapcontent-8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
|
||||||
|
Source:
|
||||||
|
API Group: <nil>
|
||||||
|
Kind: PersistentVolumeClaim
|
||||||
|
Name: rbd-pvc
|
||||||
|
Status:
|
||||||
|
Creation Time: 2019-02-06T08:52:34Z
|
||||||
|
Ready To Use: true
|
||||||
|
Restore Size: 1Gi
|
||||||
|
Events: <none>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To be sure everything is OK you can run `rbd snap ls [your-pvc-name]` inside
|
||||||
|
one of your Ceph pod.
|
||||||
|
|
||||||
|
To restore the snapshot to a new PVC, deploy
|
||||||
|
[pvc-restore.yaml](./rbd/pvc-restore.yaml) and a testing pod
|
||||||
|
[pod-restore.yaml](./rbd/pod-restore.yaml):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f pvc-restore.yaml
|
||||||
|
kubectl create -f pod-restore.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## How to test RBD MULTI_NODE_MULTI_WRITER BLOCK feature
|
||||||
|
|
||||||
|
Requires feature-gates: `BlockVolume=true` `CSIBlockVolume=true`
|
||||||
|
|
||||||
|
*NOTE* The MULTI_NODE_MULTI_WRITER capability is only available for
|
||||||
|
Volumes that are of access_type `block`
|
||||||
|
|
||||||
|
*WARNING* This feature is strictly for workloads that know how to deal
|
||||||
|
with concurrent access to the Volume (eg Active/Passive applications).
|
||||||
|
Using RWX modes on non clustered file systems with applications trying
|
||||||
|
to simultaneously access the Volume will likely result in data corruption!
|
||||||
|
|
||||||
|
Following are examples for issuing a request for a `Block`
|
||||||
|
`ReadWriteMany` Claim, and using the resultant Claim for a POD
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: block-pvc
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
volumeMode: Block
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
storageClassName: csi-rbd
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a POD that uses this PVC:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: my-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: my-container
|
||||||
|
image: debian
|
||||||
|
command: ["/bin/bash", "-c"]
|
||||||
|
args: [ "tail -f /dev/null" ]
|
||||||
|
volumeDevices:
|
||||||
|
- devicePath: /dev/rbdblock
|
||||||
|
name: my-volume
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
volumes:
|
||||||
|
- name: my-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: block-pvc
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, we can create a second POD (ensure the POD is scheduled on a different
|
||||||
|
node; multiwriter single node works without this feature) that also uses this
|
||||||
|
PVC at the same time, again wait for the pod to enter running state, and verify
|
||||||
|
the block device is available.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: another-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: my-container
|
||||||
|
image: debian
|
||||||
|
command: ["/bin/bash", "-c"]
|
||||||
|
args: [ "tail -f /dev/null" ]
|
||||||
|
volumeDevices:
|
||||||
|
- devicePath: /dev/rbdblock
|
||||||
|
name: my-volume
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
volumes:
|
||||||
|
- name: my-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: block-pvc
|
||||||
|
```
|
||||||
|
|
||||||
|
Wait for the PODs to enter Running state, check that our block device
|
||||||
|
is available in the container at `/dev/rdbblock` in both containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kubectl exec -it my-pod -- fdisk -l /dev/rbdblock
|
||||||
|
Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors
|
||||||
|
Units: sectors of 1 * 512 = 512 bytes
|
||||||
|
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||||
|
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kubectl exec -it another-pod -- fdisk -l /dev/rbdblock
|
||||||
|
Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors
|
||||||
|
Units: sectors of 1 * 512 = 512 bytes
|
||||||
|
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||||
|
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cluster ID based configuration
|
||||||
|
|
||||||
|
Before creating a storage class that uses the option `clusterID` to refer to a
|
||||||
|
Ceph cluster, the following actions need to be completed.
|
||||||
|
|
||||||
|
Get the following information from the Ceph cluster,
|
||||||
|
|
||||||
|
* Admin ID and key, that has privileges to perform CRUD operations on the Ceph
|
||||||
|
cluster and pools of choice
|
||||||
|
* Key is typically the output of, `ceph auth get-key client.admin` where
|
||||||
|
`admin` is the Admin ID
|
||||||
|
* Used to substitute admin/user id and key values in the files below
|
||||||
|
* Ceph monitor list
|
||||||
|
* Typically in the output of `ceph mon dump`
|
||||||
|
* Used to prepare comma separated MON list where required in the files below
|
||||||
|
* Ceph Cluster fsid
|
||||||
|
* If choosing to use the Ceph cluster fsid as the unique value of clusterID,
|
||||||
|
* Output of `ceph fsid`
|
||||||
|
* Used to substitute `<cluster-id>` references in the files below
|
||||||
|
|
||||||
|
Update the template
|
||||||
|
[template-ceph-cluster-ID-secret.yaml](./rbd/template-ceph-cluster-ID-secret.yaml)
|
||||||
|
with values from
|
||||||
|
a Ceph cluster and replace `<cluster-id>` with the chosen clusterID to create
|
||||||
|
the following secret,
|
||||||
|
|
||||||
|
* `kubectl create -f rbd/template-ceph-cluster-ID-secret.yaml`
|
||||||
|
|
||||||
|
Storage class and snapshot class, using `<cluster-id>` as the value for the
|
||||||
|
option `clusterID`, can now be created on the cluster.
|
||||||
|
|
||||||
|
Remaining steps to test functionality remains the same as mentioned in the
|
||||||
|
sections above.
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -4,7 +4,7 @@ CONTAINER_NAME=csi-cephfsplugin
|
|||||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||||
|
|
||||||
function get_pod_status() {
|
function get_pod_status() {
|
||||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
|
||||||
}
|
}
|
||||||
|
|
||||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||||
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
|
|||||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||||
done
|
done
|
||||||
|
|
||||||
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
|
kubectl exec -it "${POD_NAME#*/}" -c "$CONTAINER_NAME" bash
|
||||||
|
@ -4,7 +4,7 @@ CONTAINER_NAME=csi-cephfsplugin
|
|||||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||||
|
|
||||||
function get_pod_status() {
|
function get_pod_status() {
|
||||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
|
||||||
}
|
}
|
||||||
|
|
||||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||||
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
|
|||||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||||
done
|
done
|
||||||
|
|
||||||
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
|
kubectl logs -f "$POD_NAME" -c "$CONTAINER_NAME"
|
||||||
|
@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
|
|||||||
|
|
||||||
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin)
|
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin)
|
||||||
|
|
||||||
for obj in ${objects[@]}; do
|
for obj in "${objects[@]}"; do
|
||||||
kubectl create -f "./$obj.yaml"
|
kubectl create -f "./$obj.yaml"
|
||||||
done
|
done
|
||||||
|
@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
|
|||||||
|
|
||||||
objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
||||||
|
|
||||||
for obj in ${objects[@]}; do
|
for obj in "${objects[@]}"; do
|
||||||
kubectl delete -f "./$obj.yaml"
|
kubectl delete -f "./$obj.yaml"
|
||||||
done
|
done
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
@ -14,4 +15,3 @@ spec:
|
|||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: csi-cephfs-pvc
|
claimName: csi-cephfs-pvc
|
||||||
readOnly: false
|
readOnly: false
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
|
---
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-cephfs
|
name: csi-cephfs
|
||||||
provisioner: csi-cephfsplugin
|
provisioner: cephfs.csi.ceph.com
|
||||||
parameters:
|
parameters:
|
||||||
# Comma separated list of Ceph monitors
|
# Comma separated list of Ceph monitors
|
||||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||||
@ -13,7 +14,8 @@ parameters:
|
|||||||
# Requires admin credentials (adminID, adminKey).
|
# Requires admin credentials (adminID, adminKey).
|
||||||
# For provisionVolume: "false":
|
# For provisionVolume: "false":
|
||||||
# It is assumed the volume already exists and the user is expected
|
# It is assumed the volume already exists and the user is expected
|
||||||
# to provide path to that volume (rootPath) and user credentials (userID, userKey).
|
# to provide path to that volume (rootPath) and user credentials
|
||||||
|
# (userID, userKey).
|
||||||
provisionVolume: "true"
|
provisionVolume: "true"
|
||||||
|
|
||||||
# Ceph pool into which the volume shall be created
|
# Ceph pool into which the volume shall be created
|
||||||
@ -25,13 +27,16 @@ parameters:
|
|||||||
# rootPath: /absolute/path
|
# rootPath: /absolute/path
|
||||||
|
|
||||||
# The secrets have to contain user and/or Ceph admin credentials.
|
# The secrets have to contain user and/or Ceph admin credentials.
|
||||||
csiProvisionerSecretName: csi-cephfs-secret
|
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
|
||||||
csiProvisionerSecretNamespace: default
|
csi.storage.k8s.io/provisioner-secret-namespace: default
|
||||||
csiNodeStageSecretName: csi-cephfs-secret
|
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
|
||||||
csiNodeStageSecretNamespace: default
|
csi.storage.k8s.io/node-stage-secret-namespace: default
|
||||||
|
|
||||||
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
|
# (optional) The driver can use either ceph-fuse (fuse) or
|
||||||
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
|
# ceph kernelclient (kernel).
|
||||||
# or by setting the default mounter explicitly via --volumemounter command-line argument.
|
# If omitted, default volume mounter will be used - this is
|
||||||
|
# determined by probing for ceph-fuse
|
||||||
|
# or by setting the default mounter explicitly via
|
||||||
|
# --volumemounter command-line argument.
|
||||||
# mounter: kernel
|
# mounter: kernel
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: external-snapshotter-runner
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumes"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["persistentvolumeclaims"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["storageclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["get", "list"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshotclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshotcontents"]
|
|
||||||
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
|
||||||
resources: ["volumesnapshots"]
|
|
||||||
verbs: ["get", "list", "watch", "update"]
|
|
||||||
- apiGroups: ["apiextensions.k8s.io"]
|
|
||||||
resources: ["customresourcedefinitions"]
|
|
||||||
verbs: ["create"]
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: csi-snapshotter-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-snapshotter
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: external-snapshotter-runner
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -1,56 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: csi-snapshotter
|
|
||||||
---
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: csi-snapshotter
|
|
||||||
labels:
|
|
||||||
app: csi-snapshotter
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: csi-snapshotter
|
|
||||||
ports:
|
|
||||||
- name: dummy
|
|
||||||
port: 12345
|
|
||||||
---
|
|
||||||
kind: StatefulSet
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: csi-snapshotter
|
|
||||||
spec:
|
|
||||||
serviceName: "csi-snapshotter"
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: csi-snapshotter
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: csi-snapshotter
|
|
||||||
spec:
|
|
||||||
serviceAccount: csi-snapshotter
|
|
||||||
containers:
|
|
||||||
- name: csi-snapshotter
|
|
||||||
image: quay.io/k8scsi/csi-snapshotter:v0.4.0
|
|
||||||
args:
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
|
||||||
- "--connection-timeout=15s"
|
|
||||||
- "--v=5"
|
|
||||||
env:
|
|
||||||
- name: ADDRESS
|
|
||||||
value: /csi/csi.sock
|
|
||||||
imagePullPolicy: Always
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: socket-dir
|
|
||||||
mountPath: /csi
|
|
||||||
imagePullPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- hostPath:
|
|
||||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
name: socket-dir
|
|
@ -4,7 +4,7 @@ CONTAINER_NAME=csi-rbdplugin
|
|||||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||||
|
|
||||||
function get_pod_status() {
|
function get_pod_status() {
|
||||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
|
||||||
}
|
}
|
||||||
|
|
||||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||||
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
|
|||||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||||
done
|
done
|
||||||
|
|
||||||
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
|
kubectl exec -it "${POD_NAME#*/}" -c "$CONTAINER_NAME" bash
|
||||||
|
@ -4,7 +4,7 @@ CONTAINER_NAME=csi-rbdplugin
|
|||||||
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
|
||||||
|
|
||||||
function get_pod_status() {
|
function get_pod_status() {
|
||||||
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
|
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
|
||||||
}
|
}
|
||||||
|
|
||||||
while [[ "$(get_pod_status)" != "Running" ]]; do
|
while [[ "$(get_pod_status)" != "Running" ]]; do
|
||||||
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
|
|||||||
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
echo "Waiting for $POD_NAME (status $(get_pod_status))"
|
||||||
done
|
done
|
||||||
|
|
||||||
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
|
kubectl logs -f "$POD_NAME" -c "$CONTAINER_NAME"
|
||||||
|
@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
|
|||||||
|
|
||||||
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin)
|
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin)
|
||||||
|
|
||||||
for obj in ${objects[@]}; do
|
for obj in "${objects[@]}"; do
|
||||||
kubectl create -f "./$obj.yaml"
|
kubectl create -f "./$obj.yaml"
|
||||||
done
|
done
|
||||||
|
@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
|
|||||||
|
|
||||||
objects=(csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
objects=(csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
|
||||||
|
|
||||||
for obj in ${objects[@]}; do
|
for obj in "${objects[@]}"; do
|
||||||
kubectl delete -f "./$obj.yaml"
|
kubectl delete -f "./$obj.yaml"
|
||||||
done
|
done
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: csirbd-block-pod
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: web-server
|
|
||||||
image: nginx
|
|
||||||
volumeDevices:
|
|
||||||
- name: data
|
|
||||||
devicePath: /dev/vda
|
|
||||||
volumes:
|
|
||||||
- name: data
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: rbd-block-pvc
|
|
||||||
readOnly: false
|
|
17
examples/rbd/pod-restore.yaml
Normal file
17
examples/rbd/pod-restore.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: csirbd-restore-demo-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: web-server
|
||||||
|
image: nginx
|
||||||
|
volumeMounts:
|
||||||
|
- name: mypvc
|
||||||
|
mountPath: /var/lib/www/html
|
||||||
|
volumes:
|
||||||
|
- name: mypvc
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: rbd-pvc-restore
|
||||||
|
readOnly: false
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
@ -14,4 +15,3 @@ spec:
|
|||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: rbd-pvc
|
claimName: rbd-pvc
|
||||||
readOnly: false
|
readOnly: false
|
||||||
|
|
||||||
|
16
examples/rbd/pvc-restore.yaml
Normal file
16
examples/rbd/pvc-restore.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: rbd-pvc-restore
|
||||||
|
spec:
|
||||||
|
storageClassName: csi-rbd
|
||||||
|
dataSource:
|
||||||
|
name: rbd-pvc-snapshot
|
||||||
|
kind: VolumeSnapshot
|
||||||
|
apiGroup: snapshot.storage.k8s.io
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
|
18
examples/rbd/raw-block-pod.yaml
Normal file
18
examples/rbd/raw-block-pod.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: pod-with-raw-block-volume
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: fc-container
|
||||||
|
image: fedora:26
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args: ["tail -f /dev/null"]
|
||||||
|
volumeDevices:
|
||||||
|
- name: data
|
||||||
|
devicePath: /dev/xvda
|
||||||
|
volumes:
|
||||||
|
- name: data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: raw-block-pvc
|
@ -1,11 +1,12 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
name: rbd-block-pvc
|
name: raw-block-pvc
|
||||||
spec:
|
spec:
|
||||||
volumeMode: Block
|
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
|
volumeMode: Block
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 1Gi
|
storage: 1Gi
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
apiVersion: snapshot.storage.k8s.io/v1alpha1
|
apiVersion: snapshot.storage.k8s.io/v1alpha1
|
||||||
kind: VolumeSnapshot
|
kind: VolumeSnapshot
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -1,10 +1,23 @@
|
|||||||
|
---
|
||||||
apiVersion: snapshot.storage.k8s.io/v1alpha1
|
apiVersion: snapshot.storage.k8s.io/v1alpha1
|
||||||
kind: VolumeSnapshotClass
|
kind: VolumeSnapshotClass
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-rbdplugin-snapclass
|
name: csi-rbdplugin-snapclass
|
||||||
snapshotter: csi-rbdplugin
|
snapshotter: rbd.csi.ceph.com
|
||||||
parameters:
|
parameters:
|
||||||
pool: rbd
|
pool: rbd
|
||||||
|
# Comma separated list of Ceph monitors
|
||||||
|
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||||
monitors: mon1:port,mon2:port,...
|
monitors: mon1:port,mon2:port,...
|
||||||
csiSnapshotterSecretName: csi-rbd-secret
|
# OR,
|
||||||
csiSnapshotterSecretNamespace: default
|
# String representing a Ceph cluster to provision storage from.
|
||||||
|
# Should be unique across all Ceph clusters in use for provisioning,
|
||||||
|
# cannot be greater than 36 bytes in length, and should remain immutable for
|
||||||
|
# the lifetime of the StorageClass in use.
|
||||||
|
# If using clusterID, ensure to create a secret, as in
|
||||||
|
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to
|
||||||
|
# represent the Ceph cluster in clusterID
|
||||||
|
# clusterID: <cluster-id>
|
||||||
|
|
||||||
|
csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
|
||||||
|
csi.storage.k8s.io/snapshotter-secret-namespace: default
|
||||||
|
@ -1,35 +1,51 @@
|
|||||||
|
---
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-rbd
|
name: csi-rbd
|
||||||
provisioner: csi-rbdplugin
|
provisioner: rbd.csi.ceph.com
|
||||||
parameters:
|
parameters:
|
||||||
# Comma separated list of Ceph monitors
|
# Comma separated list of Ceph monitors
|
||||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||||
monitors: mon1:port,mon2:port,...
|
monitors: mon1:port,mon2:port,...
|
||||||
|
# OR,
|
||||||
|
# String representing a Ceph cluster to provision storage from.
|
||||||
|
# Should be unique across all Ceph clusters in use for provisioning,
|
||||||
|
# cannot be greater than 36 bytes in length, and should remain immutable for
|
||||||
|
# the lifetime of the StorageClass in use.
|
||||||
|
# If using clusterID, ensure to create a secret, as in
|
||||||
|
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to
|
||||||
|
# represent the Ceph cluster in clusterID
|
||||||
|
# clusterID: <cluster-id>
|
||||||
|
# OR,
|
||||||
# if "monitors" parameter is not set, driver to get monitors from same
|
# if "monitors" parameter is not set, driver to get monitors from same
|
||||||
# secret as admin/user credentials. "monValueFromSecret" provides the
|
# secret as admin/user credentials. "monValueFromSecret" provides the
|
||||||
# key in the secret whose value is the mons
|
# key in the secret whose value is the mons
|
||||||
# monValueFromSecret: "monitors"
|
# monValueFromSecret: "monitors"
|
||||||
|
|
||||||
|
|
||||||
# Ceph pool into which the RBD image shall be created
|
# Ceph pool into which the RBD image shall be created
|
||||||
pool: rbd
|
pool: rbd
|
||||||
|
|
||||||
# RBD image format. Defaults to "2".
|
# RBD image format. Defaults to "2".
|
||||||
imageFormat: "2"
|
imageFormat: "2"
|
||||||
|
|
||||||
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
|
# RBD image features. Available for imageFormat: "2"
|
||||||
|
# CSI RBD currently supports only `layering` feature.
|
||||||
imageFeatures: layering
|
imageFeatures: layering
|
||||||
|
|
||||||
# The secrets have to contain Ceph admin credentials.
|
# The secrets have to contain Ceph admin credentials.
|
||||||
csiProvisionerSecretName: csi-rbd-secret
|
# NOTE: If using "clusterID" instead of "monitors" above, the following
|
||||||
csiProvisionerSecretNamespace: default
|
# secrets MAY be added to the ceph-cluster-<cluster-id> secret and skipped
|
||||||
csiNodePublishSecretName: csi-rbd-secret
|
# here
|
||||||
csiNodePublishSecretNamespace: default
|
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
|
||||||
|
csi.storage.k8s.io/provisioner-secret-namespace: default
|
||||||
|
csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
|
||||||
|
csi.storage.k8s.io/node-publish-secret-namespace: default
|
||||||
|
|
||||||
# Ceph users for operating RBD
|
# Ceph users for operating RBD
|
||||||
|
# NOTE: If using "clusterID" instead of "monitors" above, the following
|
||||||
|
# IDs MAY be added to the ceph-cluster-<cluster-id> secret and skipped
|
||||||
|
# here
|
||||||
adminid: admin
|
adminid: admin
|
||||||
userid: kubernetes
|
userid: kubernetes
|
||||||
# uncomment the following to use rbd-nbd as mounter on supported nodes
|
# uncomment the following to use rbd-nbd as mounter on supported nodes
|
||||||
|
36
examples/rbd/template-ceph-cluster-ID-secret.yaml
Normal file
36
examples/rbd/template-ceph-cluster-ID-secret.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
# This is a template secret that helps define a Ceph cluster configuration
|
||||||
|
# as required by the CSI driver. This is used when a StorageClass has the
|
||||||
|
# "clusterID" defined as one of the parameters, to provide the CSI instance
|
||||||
|
# Ceph cluster configuration information.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
# The <cluster-id> is used by the CSI plugin to uniquely identify and use a
|
||||||
|
# Ceph cluster, the value MUST match the value provided as `clusterID` in the
|
||||||
|
# StorageClass
|
||||||
|
name: ceph-cluster-<cluster-id>
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
# Base64 encoded and comma separated Ceph cluster monitor list
|
||||||
|
# - Typically output of: `echo -n "mon1:port,mon2:port,..." | base64`
|
||||||
|
monitors: <BASE64-ENCODED-MONLIST>
|
||||||
|
# Base64 encoded and comma separated list of pool names from which volumes
|
||||||
|
# can be provisioned
|
||||||
|
pools: <BASE64-ENCODED-POOLIST>
|
||||||
|
# Base64 encoded admin ID to use for provisioning
|
||||||
|
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||||
|
# Substitute the entire string including angle braces, with the base64 value
|
||||||
|
adminid: <BASE64-ENCODED-ID>
|
||||||
|
# Base64 encoded key of the provisioner admin ID
|
||||||
|
# - Output of: `ceph auth get-key client.<admin-id> | base64`
|
||||||
|
# Substitute the entire string including angle braces, with the base64 value
|
||||||
|
adminkey: <BASE64-ENCODED-PASSWORD>
|
||||||
|
# Base64 encoded user ID to use for publishing
|
||||||
|
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||||
|
# Substitute the entire string including angle braces, with the base64 value
|
||||||
|
userid: <BASE64-ENCODED-ID>
|
||||||
|
# Base64 encoded key of the publisher user ID
|
||||||
|
# - Output of: `ceph auth get-key client.<admin-id> | base64`
|
||||||
|
# Substitute the entire string including angle braces, with the base64 value
|
||||||
|
userkey: <BASE64-ENCODED-PASSWORD>
|
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
# This is a patch to the existing daemonset deployment of CSI rbdplugin.
|
||||||
|
#
|
||||||
|
# This is to be used when using `clusterID` instead of monitors or
|
||||||
|
# monValueFromSecret in the StorageClass to specify the Ceph cluster to
|
||||||
|
# provision storage from, AND when the value of `--configroot` option to the
|
||||||
|
# CSI pods is NOT "k8s_objects".
|
||||||
|
#
|
||||||
|
# This patch file, patches in the specified secret for the 'clusterID' as a
|
||||||
|
# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes
|
||||||
|
# secrets.
|
||||||
|
#
|
||||||
|
# Post substituting the <cluster-id> in all places execute,
|
||||||
|
# `kubectl patch daemonset csi-rbdplugin --patch\
|
||||||
|
# "$(cat template-csi-rbdplugin-patch.yaml)"`
|
||||||
|
# to patch the daemonset deployment.
|
||||||
|
#
|
||||||
|
# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\
|
||||||
|
# "$(cat template-csi-rbdplugin-patch.yaml)"`
|
||||||
|
# to patch the statefulset deployment.
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: csi-rbdplugin
|
||||||
|
volumeMounts:
|
||||||
|
- name: ceph-cluster-<cluster-id>
|
||||||
|
mountPath: "/etc/csi-config/ceph-cluster-<cluster-id>"
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: ceph-cluster-<cluster-id>
|
||||||
|
secret:
|
||||||
|
secretName: ceph-cluster-<cluster-id>
|
@ -17,114 +17,32 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"text/template"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const cephConfig = `[global]
|
var cephConfig = []byte(`[global]
|
||||||
mon_host = {{.Monitors}}
|
|
||||||
auth_cluster_required = cephx
|
auth_cluster_required = cephx
|
||||||
auth_service_required = cephx
|
auth_service_required = cephx
|
||||||
auth_client_required = cephx
|
auth_client_required = cephx
|
||||||
|
|
||||||
# Workaround for http://tracker.ceph.com/issues/23446
|
# Workaround for http://tracker.ceph.com/issues/23446
|
||||||
fuse_set_user_groups = false
|
fuse_set_user_groups = false
|
||||||
`
|
`)
|
||||||
|
|
||||||
const cephKeyring = `[client.{{.UserId}}]
|
|
||||||
key = {{.Key}}
|
|
||||||
`
|
|
||||||
|
|
||||||
const cephSecret = `{{.Key}}`
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cephConfigRoot = "/etc/ceph"
|
cephConfigRoot = "/etc/ceph"
|
||||||
cephConfigFileNameFmt = "ceph.share.%s.conf"
|
cephConfigPath = "/etc/ceph/ceph.conf"
|
||||||
cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring"
|
|
||||||
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func createCephConfigRoot() error {
|
||||||
cephConfigTempl *template.Template
|
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
|
||||||
cephKeyringTempl *template.Template
|
|
||||||
cephSecretTempl *template.Template
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
fm := map[string]interface{}{
|
|
||||||
"perms": func(readOnly bool) string {
|
|
||||||
if readOnly {
|
|
||||||
return "r"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "rw"
|
func writeCephConfig() error {
|
||||||
},
|
if err := createCephConfigRoot(); err != nil {
|
||||||
}
|
|
||||||
|
|
||||||
cephConfigTempl = template.Must(template.New("config").Parse(cephConfig))
|
|
||||||
cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring))
|
|
||||||
cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret))
|
|
||||||
}
|
|
||||||
|
|
||||||
type cephConfigWriter interface {
|
|
||||||
writeToFile() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type cephConfigData struct {
|
|
||||||
Monitors string
|
|
||||||
VolumeID volumeID
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error {
|
|
||||||
if err := os.MkdirAll(cephConfigRoot, 0755); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(path.Join(cephConfigRoot, fileName), os.O_CREATE|os.O_EXCL|os.O_WRONLY, m)
|
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640)
|
||||||
if err != nil {
|
|
||||||
if os.IsExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return t.Execute(f, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *cephConfigData) writeToFile() error {
|
|
||||||
return writeCephTemplate(fmt.Sprintf(cephConfigFileNameFmt, d.VolumeID), 0640, cephConfigTempl, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cephKeyringData struct {
|
|
||||||
UserId, Key string
|
|
||||||
VolumeID volumeID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *cephKeyringData) writeToFile() error {
|
|
||||||
return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserId), 0600, cephKeyringTempl, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cephSecretData struct {
|
|
||||||
UserId, Key string
|
|
||||||
VolumeID volumeID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *cephSecretData) writeToFile() error {
|
|
||||||
return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserId), 0600, cephSecretTempl, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCephSecretPath(volId volumeID, userId string) string {
|
|
||||||
return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volId, userId))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCephKeyringPath(volId volumeID, userId string) string {
|
|
||||||
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volId, userId))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCephConfPath(volId volumeID) string {
|
|
||||||
return path.Join(cephConfigRoot, fmt.Sprintf(cephConfigFileNameFmt, volId))
|
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,7 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -47,76 +44,65 @@ func (ent *cephEntity) toCredentials() *credentials {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCephUserName(volId volumeID) string {
|
func getCephUserName(volID volumeID) string {
|
||||||
return cephUserPrefix + string(volId)
|
return cephUserPrefix + string(volID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) {
|
func getSingleCephEntity(args ...string) (*cephEntity, error) {
|
||||||
entityName := cephEntityClientPrefix + getCephUserName(volId)
|
|
||||||
|
|
||||||
var ents []cephEntity
|
var ents []cephEntity
|
||||||
args := [...]string{
|
if err := execCommandJSON(&ents, "ceph", args...); err != nil {
|
||||||
"auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
|
return nil, err
|
||||||
"get", entityName,
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := execCommand("ceph", args[:]...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cephfs: ceph failed with following error: %s\ncephfs: ceph output: %s", err, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Workaround for output from `ceph auth get`
|
|
||||||
// Contains non-json data: "exported keyring for ENTITY\n\n"
|
|
||||||
offset := bytes.Index(out, []byte("[{"))
|
|
||||||
|
|
||||||
if json.NewDecoder(bytes.NewReader(out[offset:])).Decode(&ents); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode json: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ents) != 1 {
|
if len(ents) != 1 {
|
||||||
return nil, fmt.Errorf("got unexpected number of entities for %s: expected 1, got %d", entityName, len(ents))
|
return nil, fmt.Errorf("got unexpected number of entities: expected 1, got %d", len(ents))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ents[0], nil
|
return &ents[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volId volumeID) (*cephEntity, error) {
|
func genUserIDs(adminCr *credentials, volID volumeID) (adminID, userID string) {
|
||||||
caps := cephEntityCaps{
|
return cephEntityClientPrefix + adminCr.id, cephEntityClientPrefix + getCephUserName(volID)
|
||||||
Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volId)),
|
|
||||||
Mon: "allow r",
|
|
||||||
Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volId)),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var ents []cephEntity
|
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
|
||||||
args := [...]string{
|
adminID, userID := genUserIDs(adminCr, volID)
|
||||||
"auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
|
|
||||||
"get-or-create", cephEntityClientPrefix + getCephUserName(volId),
|
return getSingleCephEntity(
|
||||||
"mds", caps.Mds,
|
"-m", volOptions.Monitors,
|
||||||
"mon", caps.Mon,
|
"-n", adminID,
|
||||||
"osd", caps.Osd,
|
"--key="+adminCr.key,
|
||||||
|
"-c", cephConfigPath,
|
||||||
|
"-f", "json",
|
||||||
|
"auth", "get", userID,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := execCommandJSON(&ents, "ceph", args[:]...); err != nil {
|
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
|
||||||
return nil, fmt.Errorf("error creating ceph user: %v", err)
|
adminID, userID := genUserIDs(adminCr, volID)
|
||||||
|
|
||||||
|
return getSingleCephEntity(
|
||||||
|
"-m", volOptions.Monitors,
|
||||||
|
"-n", adminID,
|
||||||
|
"--key="+adminCr.key,
|
||||||
|
"-c", cephConfigPath,
|
||||||
|
"-f", "json",
|
||||||
|
"auth", "get-or-create", userID,
|
||||||
|
// User capabilities
|
||||||
|
"mds", fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)),
|
||||||
|
"mon", "allow r",
|
||||||
|
"osd", fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ents[0], nil
|
func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
|
||||||
}
|
adminID, userID := genUserIDs(adminCr, volID)
|
||||||
|
|
||||||
func deleteCephUser(adminCr *credentials, volId volumeID) error {
|
return execCommandErr("ceph",
|
||||||
userId := getCephUserName(volId)
|
"-m", volOptions.Monitors,
|
||||||
|
"-n", adminID,
|
||||||
args := [...]string{
|
"--key="+adminCr.key,
|
||||||
"-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
|
"-c", cephConfigPath,
|
||||||
"auth", "rm", cephEntityClientPrefix + userId,
|
"auth", "rm", userID,
|
||||||
}
|
)
|
||||||
|
|
||||||
if err := execCommandAndValidate("ceph", args[:]...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Remove(getCephKeyringPath(volId, userId))
|
|
||||||
os.Remove(getCephSecretPath(volId, userId))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
@ -17,18 +17,20 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||||
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
|
|
||||||
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
"k8s.io/klog"
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
|
||||||
|
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type controllerServer struct {
|
// ControllerServer struct of CEPH CSI driver with supported methods of CSI
|
||||||
|
// controller server spec.
|
||||||
|
type ControllerServer struct {
|
||||||
*csicommon.DefaultControllerServer
|
*csicommon.DefaultControllerServer
|
||||||
MetadataStore util.CachePersister
|
MetadataStore util.CachePersister
|
||||||
}
|
}
|
||||||
@ -38,129 +40,152 @@ type controllerCacheEntry struct {
|
|||||||
VolumeID volumeID
|
VolumeID volumeID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
var (
|
||||||
|
mtxControllerVolumeID = keymutex.NewHashed(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateVolume creates the volume in backend and store the volume metadata
|
||||||
|
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||||
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
klog.Errorf("CreateVolumeRequest validation failed: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
|
|
||||||
volOptions, err := newVolumeOptions(req.GetParameters())
|
secret := req.GetSecrets()
|
||||||
|
volOptions, err := newVolumeOptions(req.GetParameters(), secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("validation of volume options failed: %v", err)
|
klog.Errorf("validation of volume options failed: %v", err)
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
volId := newVolumeID()
|
volID := makeVolumeID(req.GetName())
|
||||||
|
|
||||||
conf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId}
|
mtxControllerVolumeID.LockKey(string(volID))
|
||||||
if err = conf.writeToFile(); err != nil {
|
defer mustUnlock(mtxControllerVolumeID, string(volID))
|
||||||
glog.Errorf("failed to write ceph config file to %s: %v", getCephConfPath(volId), err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a volume in case the user didn't provide one
|
// Create a volume in case the user didn't provide one
|
||||||
|
|
||||||
if volOptions.ProvisionVolume {
|
if volOptions.ProvisionVolume {
|
||||||
// Admin credentials are required
|
// Admin credentials are required
|
||||||
cr, err := getAdminCredentials(req.GetControllerCreateSecrets())
|
cr, err := getAdminCredentials(secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = storeCephCredentials(volId, cr); err != nil {
|
if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil {
|
||||||
glog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err)
|
klog.Errorf("failed to create volume %s: %v", req.GetName(), err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = createVolume(volOptions, cr, volId, req.GetCapacityRange().GetRequiredBytes()); err != nil {
|
if _, err = createCephUser(volOptions, cr, volID); err != nil {
|
||||||
glog.Errorf("failed to create volume %s: %v", req.GetName(), err)
|
klog.Errorf("failed to create ceph user for volume %s: %v", req.GetName(), err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = createCephUser(volOptions, cr, volId); err != nil {
|
klog.Infof("cephfs: successfully created volume %s", volID)
|
||||||
glog.Errorf("failed to create ceph user for volume %s: %v", req.GetName(), err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully created volume %s", volId)
|
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("cephfs: volume %s is provisioned statically", volId)
|
klog.Infof("cephfs: volume %s is provisioned statically", volID)
|
||||||
}
|
}
|
||||||
|
|
||||||
ce := &controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}
|
ce := &controllerCacheEntry{VolOptions: *volOptions, VolumeID: volID}
|
||||||
if err := cs.MetadataStore.Create(string(volId), ce); err != nil {
|
if err := cs.MetadataStore.Create(string(volID), ce); err != nil {
|
||||||
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
|
klog.Errorf("failed to store a cache entry for volume %s: %v", volID, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.CreateVolumeResponse{
|
return &csi.CreateVolumeResponse{
|
||||||
Volume: &csi.Volume{
|
Volume: &csi.Volume{
|
||||||
Id: string(volId),
|
VolumeId: string(volID),
|
||||||
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
|
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
|
||||||
Attributes: req.GetParameters(),
|
VolumeContext: req.GetParameters(),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
// DeleteVolume deletes the volume in backend
|
||||||
if err := cs.validateDeleteVolumeRequest(req); err != nil {
|
// and removes the volume metadata from store
|
||||||
glog.Errorf("DeleteVolumeRequest validation failed: %v", err)
|
// nolint: gocyclo
|
||||||
|
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||||
|
if err := cs.validateDeleteVolumeRequest(); err != nil {
|
||||||
|
klog.Errorf("DeleteVolumeRequest validation failed: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
volId = volumeID(req.GetVolumeId())
|
volID = volumeID(req.GetVolumeId())
|
||||||
err error
|
secrets = req.GetSecrets()
|
||||||
)
|
)
|
||||||
|
|
||||||
ce := &controllerCacheEntry{}
|
ce := &controllerCacheEntry{}
|
||||||
if err := cs.MetadataStore.Get(string(volId), ce); err != nil {
|
if err := cs.MetadataStore.Get(string(volID), ce); err != nil {
|
||||||
|
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||||
|
klog.Infof("cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err)
|
||||||
|
return &csi.DeleteVolumeResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ce.VolOptions.ProvisionVolume {
|
if !ce.VolOptions.ProvisionVolume {
|
||||||
// DeleteVolume() is forbidden for statically provisioned volumes!
|
// DeleteVolume() is forbidden for statically provisioned volumes!
|
||||||
|
|
||||||
glog.Warningf("volume %s is provisioned statically, aborting delete", volId)
|
klog.Warningf("volume %s is provisioned statically, aborting delete", volID)
|
||||||
return &csi.DeleteVolumeResponse{}, nil
|
return &csi.DeleteVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mons may have changed since create volume,
|
||||||
|
// retrieve the latest mons and override old mons
|
||||||
|
if mon, secretsErr := getMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
|
||||||
|
klog.Infof("overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
|
||||||
|
ce.VolOptions.Monitors = mon
|
||||||
|
}
|
||||||
|
|
||||||
// Deleting a volume requires admin credentials
|
// Deleting a volume requires admin credentials
|
||||||
|
|
||||||
cr, err := getAdminCredentials(req.GetControllerDeleteSecrets())
|
cr, err := getAdminCredentials(secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to retrieve admin credentials: %v", err)
|
klog.Errorf("failed to retrieve admin credentials: %v", err)
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = purgeVolume(volId, cr, &ce.VolOptions); err != nil {
|
mtxControllerVolumeID.LockKey(string(volID))
|
||||||
glog.Errorf("failed to delete volume %s: %v", volId, err)
|
defer mustUnlock(mtxControllerVolumeID, string(volID))
|
||||||
|
|
||||||
|
if err = purgeVolume(volID, cr, &ce.VolOptions); err != nil {
|
||||||
|
klog.Errorf("failed to delete volume %s: %v", volID, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = deleteCephUser(cr, volId); err != nil {
|
if err = deleteCephUser(&ce.VolOptions, cr, volID); err != nil {
|
||||||
glog.Errorf("failed to delete ceph user for volume %s: %v", volId, err)
|
klog.Errorf("failed to delete ceph user for volume %s: %v", volID, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cs.MetadataStore.Delete(string(volId)); err != nil {
|
if err = cs.MetadataStore.Delete(string(volID)); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully deleted volume %s", volId)
|
klog.Infof("cephfs: successfully deleted volume %s", volID)
|
||||||
|
|
||||||
return &csi.DeleteVolumeResponse{}, nil
|
return &csi.DeleteVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *controllerServer) ValidateVolumeCapabilities(
|
// ValidateVolumeCapabilities checks whether the volume capabilities requested
|
||||||
|
// are supported.
|
||||||
|
func (cs *ControllerServer) ValidateVolumeCapabilities(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||||
// Cephfs doesn't support Block volume
|
// Cephfs doesn't support Block volume
|
||||||
for _, cap := range req.VolumeCapabilities {
|
for _, cap := range req.VolumeCapabilities {
|
||||||
if cap.GetBlock() != nil {
|
if cap.GetBlock() != nil {
|
||||||
return &csi.ValidateVolumeCapabilitiesResponse{Supported: false, Message: ""}, nil
|
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &csi.ValidateVolumeCapabilitiesResponse{Supported: true}, nil
|
return &csi.ValidateVolumeCapabilitiesResponse{
|
||||||
|
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
|
||||||
|
VolumeCapabilities: req.VolumeCapabilities,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,11 @@ package cephfs
|
|||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
credUserId = "userID"
|
credUserID = "userID"
|
||||||
credUserKey = "userKey"
|
credUserKey = "userKey"
|
||||||
credAdminId = "adminID"
|
credAdminID = "adminID"
|
||||||
credAdminKey = "adminKey"
|
credAdminKey = "adminKey"
|
||||||
|
credMonitors = "monitors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type credentials struct {
|
type credentials struct {
|
||||||
@ -48,9 +49,16 @@ func getCredentials(idField, keyField string, secrets map[string]string) (*crede
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getUserCredentials(secrets map[string]string) (*credentials, error) {
|
func getUserCredentials(secrets map[string]string) (*credentials, error) {
|
||||||
return getCredentials(credUserId, credUserKey, secrets)
|
return getCredentials(credUserID, credUserKey, secrets)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAdminCredentials(secrets map[string]string) (*credentials, error) {
|
func getAdminCredentials(secrets map[string]string) (*credentials, error) {
|
||||||
return getCredentials(credAdminId, credAdminKey, secrets)
|
return getCredentials(credAdminID, credAdminKey, secrets)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMonValFromSecret(secrets map[string]string) (string, error) {
|
||||||
|
if mons, ok := secrets[credMonitors]; ok {
|
||||||
|
return mons, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("missing %q", credMonitors)
|
||||||
}
|
}
|
||||||
|
@ -17,76 +17,78 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
|
||||||
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
|
|
||||||
|
|
||||||
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||||
"github.com/ceph/ceph-csi/pkg/util"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
|
|
||||||
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Version of the cephfs csi driver
|
|
||||||
Version = "0.3.0"
|
// version of ceph driver
|
||||||
|
version = "1.0.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// PluginFolder defines the location of ceph plugin
|
||||||
// PluginFolder is the kubelet plugin directory for cephfs plugin
|
var PluginFolder = "/var/lib/kubelet/plugins/"
|
||||||
PluginFolder = "/var/lib/kubelet/plugins/"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cephfsDriver struct {
|
// Driver contains the default identity,node and controller struct
|
||||||
driver *csicommon.CSIDriver
|
type Driver struct {
|
||||||
|
cd *csicommon.CSIDriver
|
||||||
|
|
||||||
is *identityServer
|
is *IdentityServer
|
||||||
ns *nodeServer
|
ns *NodeServer
|
||||||
cs *controllerServer
|
cs *ControllerServer
|
||||||
|
|
||||||
caps []*csi.VolumeCapability_AccessMode
|
|
||||||
cscaps []*csi.ControllerServiceCapability
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
driver *cephfsDriver
|
// DefaultVolumeMounter for mounting volumes
|
||||||
DefaultVolumeMounter string
|
DefaultVolumeMounter string
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCephFSDriver provides a remote csi cephfs driver object.
|
// NewDriver returns new ceph driver
|
||||||
func NewCephFSDriver() *cephfsDriver {
|
func NewDriver() *Driver {
|
||||||
return &cephfsDriver{}
|
return &Driver{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIdentityServer(d *csicommon.CSIDriver) *identityServer {
|
// NewIdentityServer initialize a identity server for ceph CSI driver
|
||||||
return &identityServer{
|
func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
|
||||||
|
return &IdentityServer{
|
||||||
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
|
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *controllerServer {
|
// NewControllerServer initialize a controller server for ceph CSI driver
|
||||||
return &controllerServer{
|
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
|
||||||
|
return &ControllerServer{
|
||||||
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
|
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
|
||||||
MetadataStore: cachePersister,
|
MetadataStore: cachePersister,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
|
// NewNodeServer initialize a node server for ceph CSI driver.
|
||||||
return &nodeServer{
|
func NewNodeServer(d *csicommon.CSIDriver) *NodeServer {
|
||||||
|
return &NodeServer{
|
||||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
|
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string, cachePersister util.CachePersister) {
|
// Run start a non-blocking grpc controller,node and identityserver for
|
||||||
glog.Infof("Driver: %v version: %v", driverName, Version)
|
// ceph CSI driver which can serve multiple parallel requests
|
||||||
|
func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir string, cachePersister util.CachePersister) {
|
||||||
|
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
|
|
||||||
if err := loadAvailableMounters(); err != nil {
|
if err := loadAvailableMounters(); err != nil {
|
||||||
glog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
|
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if volumeMounter != "" {
|
if volumeMounter != "" {
|
||||||
if err := validateMounter(volumeMounter); err != nil {
|
if err := validateMounter(volumeMounter); err != nil {
|
||||||
glog.Fatalln(err)
|
klog.Fatalln(err)
|
||||||
} else {
|
} else {
|
||||||
DefaultVolumeMounter = volumeMounter
|
DefaultVolumeMounter = volumeMounter
|
||||||
}
|
}
|
||||||
@ -97,29 +99,40 @@ func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string,
|
|||||||
DefaultVolumeMounter = availableMounters[0]
|
DefaultVolumeMounter = availableMounters[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
|
klog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
|
||||||
|
|
||||||
// Initialize default library driver
|
if err := writeCephConfig(); err != nil {
|
||||||
|
klog.Fatalf("failed to write ceph configuration file: %v", err)
|
||||||
fs.driver = csicommon.NewCSIDriver(driverName, Version, nodeId)
|
|
||||||
if fs.driver == nil {
|
|
||||||
glog.Fatalln("Failed to initialize CSI driver")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
|
initVolumeMountCache(driverName, mountCacheDir, cachePersister)
|
||||||
|
if mountCacheDir != "" {
|
||||||
|
if err := remountCachedVolumes(); err != nil {
|
||||||
|
klog.Warningf("failed to remount cached volumes: %v", err)
|
||||||
|
//ignore remount fail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Initialize default library driver
|
||||||
|
|
||||||
|
fs.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
|
||||||
|
if fs.cd == nil {
|
||||||
|
klog.Fatalln("failed to initialize CSI driver")
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
|
||||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
||||||
})
|
})
|
||||||
|
|
||||||
fs.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
|
fs.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
|
||||||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create gRPC servers
|
// Create gRPC servers
|
||||||
|
|
||||||
fs.is = NewIdentityServer(fs.driver)
|
fs.is = NewIdentityServer(fs.cd)
|
||||||
fs.ns = NewNodeServer(fs.driver)
|
fs.ns = NewNodeServer(fs.cd)
|
||||||
|
|
||||||
fs.cs = NewControllerServer(fs.driver, cachePersister)
|
fs.cs = NewControllerServer(fs.cd, cachePersister)
|
||||||
|
|
||||||
server := csicommon.NewNonBlockingGRPCServer()
|
server := csicommon.NewNonBlockingGRPCServer()
|
||||||
server.Start(endpoint, fs.is, fs.cs, fs.ns)
|
server.Start(endpoint, fs.is, fs.cs, fs.ns)
|
||||||
|
@ -19,15 +19,19 @@ package cephfs
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
|
||||||
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
)
|
)
|
||||||
|
|
||||||
type identityServer struct {
|
// IdentityServer struct of ceph CSI driver with supported methods of CSI
|
||||||
|
// identity server spec.
|
||||||
|
type IdentityServer struct {
|
||||||
*csicommon.DefaultIdentityServer
|
*csicommon.DefaultIdentityServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *identityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
// GetPluginCapabilities returns available capabilities of the ceph driver
|
||||||
|
func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||||
return &csi.GetPluginCapabilitiesResponse{
|
return &csi.GetPluginCapabilitiesResponse{
|
||||||
Capabilities: []*csi.PluginCapability{
|
Capabilities: []*csi.PluginCapability{
|
||||||
{
|
{
|
||||||
|
311
pkg/cephfs/mountcache.go
Normal file
311
pkg/cephfs/mountcache.go
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"k8s.io/klog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type volumeMountCacheEntry struct {
|
||||||
|
DriverVersion string `json:"driverVersion"`
|
||||||
|
|
||||||
|
VolumeID string `json:"volumeID"`
|
||||||
|
Secrets map[string]string `json:"secrets"`
|
||||||
|
StagingPath string `json:"stagingPath"`
|
||||||
|
TargetPaths map[string]bool `json:"targetPaths"`
|
||||||
|
CreateTime time.Time `json:"createTime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type volumeMountCacheMap struct {
|
||||||
|
volumes map[string]volumeMountCacheEntry
|
||||||
|
nodeCacheStore util.NodeCache
|
||||||
|
metadataStore util.CachePersister
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
volumeMountCachePrefix = "cephfs-mount-cache-"
|
||||||
|
volumeMountCache volumeMountCacheMap
|
||||||
|
volumeMountCacheMtx sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func initVolumeMountCache(driverName string, mountCacheDir string, cachePersister util.CachePersister) {
|
||||||
|
volumeMountCache.volumes = make(map[string]volumeMountCacheEntry)
|
||||||
|
|
||||||
|
volumeMountCache.metadataStore = cachePersister
|
||||||
|
volumeMountCache.nodeCacheStore.BasePath = mountCacheDir
|
||||||
|
volumeMountCache.nodeCacheStore.CacheDir = driverName
|
||||||
|
klog.Infof("mount-cache: name: %s, version: %s, mountCacheDir: %s", driverName, version, mountCacheDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func remountCachedVolumes() error {
|
||||||
|
if err := os.MkdirAll(volumeMountCache.nodeCacheStore.BasePath, 0755); err != nil {
|
||||||
|
klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.nodeCacheStore.BasePath, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var remountFailCount, remountSuccCount int64
|
||||||
|
me := &volumeMountCacheEntry{}
|
||||||
|
ce := &controllerCacheEntry{}
|
||||||
|
err := volumeMountCache.nodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error {
|
||||||
|
volID := me.VolumeID
|
||||||
|
if err := volumeMountCache.metadataStore.Get(volID, ce); err != nil {
|
||||||
|
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||||
|
klog.Infof("mount-cache: metadata not found, assuming the volume %s to be already deleted (%v)", volID, err)
|
||||||
|
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
||||||
|
klog.Infof("mount-cache: metadata not found, delete volume cache entry for volume %s", volID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := mountOneCacheEntry(ce, me); err == nil {
|
||||||
|
remountSuccCount++
|
||||||
|
volumeMountCache.volumes[me.VolumeID] = *me
|
||||||
|
klog.Infof("mount-cache: successfully remounted volume %s", volID)
|
||||||
|
} else {
|
||||||
|
remountFailCount++
|
||||||
|
klog.Errorf("mount-cache: failed to remount volume %s", volID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
klog.Infof("mount-cache: metastore list cache fail %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if remountFailCount > 0 {
|
||||||
|
klog.Infof("mount-cache: successfully remounted %d volumes, failed to remount %d volumes", remountSuccCount, remountFailCount)
|
||||||
|
} else {
|
||||||
|
klog.Infof("mount-cache: successfully remounted %d volumes", remountSuccCount)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountCacheEntry) error {
|
||||||
|
volumeMountCacheMtx.Lock()
|
||||||
|
defer volumeMountCacheMtx.Unlock()
|
||||||
|
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
cr *credentials
|
||||||
|
)
|
||||||
|
volID := ce.VolumeID
|
||||||
|
volOptions := ce.VolOptions
|
||||||
|
|
||||||
|
if volOptions.ProvisionVolume {
|
||||||
|
volOptions.RootPath = getVolumeRootPathCeph(volID)
|
||||||
|
cr, err = getAdminCredentials(decodeCredentials(me.Secrets))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var entity *cephEntity
|
||||||
|
entity, err = getCephUser(&volOptions, cr, volID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cr = entity.toCredentials()
|
||||||
|
} else {
|
||||||
|
cr, err = getUserCredentials(decodeCredentials(me.Secrets))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cleanupMountPoint(me.StagingPath)
|
||||||
|
if err != nil {
|
||||||
|
klog.Infof("mount-cache: failed to cleanup volume mount point %s, remove it: %s %v", volID, me.StagingPath, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
isMnt, err := isMountPoint(me.StagingPath)
|
||||||
|
if err != nil {
|
||||||
|
isMnt = false
|
||||||
|
klog.Infof("mount-cache: failed to check volume mounted %s: %s %v", volID, me.StagingPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isMnt {
|
||||||
|
m, err := newMounter(&volOptions)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("mount-cache: failed to create mounter for volume %s: %v", volID, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := m.mount(me.StagingPath, cr, &volOptions); err != nil {
|
||||||
|
klog.Errorf("mount-cache: failed to mount volume %s: %v", volID, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for targetPath, readOnly := range me.TargetPaths {
|
||||||
|
if err := cleanupMountPoint(targetPath); err == nil {
|
||||||
|
if err := bindMount(me.StagingPath, targetPath, readOnly); err != nil {
|
||||||
|
klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v",
|
||||||
|
volID, me.StagingPath, targetPath, readOnly, err)
|
||||||
|
} else {
|
||||||
|
klog.Infof("mount-cache: successfully bind-mounted volume %s: %s %s %v",
|
||||||
|
volID, me.StagingPath, targetPath, readOnly)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupMountPoint(mountPoint string) error {
|
||||||
|
if _, err := os.Stat(mountPoint); err != nil {
|
||||||
|
if isCorruptedMnt(err) {
|
||||||
|
klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint)
|
||||||
|
err := execCommandErr("umount", mountPoint)
|
||||||
|
if err != nil {
|
||||||
|
klog.Infof("mount-cache: failed to umount %s %v", mountPoint, err)
|
||||||
|
//ignore error return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(mountPoint); err != nil {
|
||||||
|
klog.Errorf("mount-cache: failed to stat mount point %s %v", mountPoint, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCorruptedMnt(err error) bool {
|
||||||
|
var underlyingError error
|
||||||
|
switch pe := err.(type) {
|
||||||
|
case nil:
|
||||||
|
return false
|
||||||
|
case *os.PathError:
|
||||||
|
underlyingError = pe.Err
|
||||||
|
case *os.LinkError:
|
||||||
|
underlyingError = pe.Err
|
||||||
|
case *os.SyscallError:
|
||||||
|
underlyingError = pe.Err
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
CorruptedErrors := []error{
|
||||||
|
syscall.ENOTCONN, syscall.ESTALE, syscall.EIO, syscall.EACCES}
|
||||||
|
|
||||||
|
for _, v := range CorruptedErrors {
|
||||||
|
if underlyingError == v {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func genVolumeMountCacheFileName(volID string) string {
|
||||||
|
cachePath := volumeMountCachePrefix + volID
|
||||||
|
return cachePath
|
||||||
|
}
|
||||||
|
func (mc *volumeMountCacheMap) isEnable() bool {
|
||||||
|
//if mount cache dir unset, disable state
|
||||||
|
return mc.nodeCacheStore.BasePath != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath string, secrets map[string]string) error {
|
||||||
|
if !mc.isEnable() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
volumeMountCacheMtx.Lock()
|
||||||
|
defer volumeMountCacheMtx.Unlock()
|
||||||
|
|
||||||
|
lastTargetPaths := make(map[string]bool)
|
||||||
|
me, ok := volumeMountCache.volumes[volID]
|
||||||
|
if ok {
|
||||||
|
if me.StagingPath == stagingTargetPath {
|
||||||
|
klog.Warningf("mount-cache: node unexpected restage volume for volume %s", volID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lastTargetPaths = me.TargetPaths
|
||||||
|
klog.Warningf("mount-cache: node stage volume ignore last cache entry for volume %s", volID)
|
||||||
|
}
|
||||||
|
|
||||||
|
me = volumeMountCacheEntry{DriverVersion: version}
|
||||||
|
|
||||||
|
me.VolumeID = volID
|
||||||
|
me.Secrets = encodeCredentials(secrets)
|
||||||
|
me.StagingPath = stagingTargetPath
|
||||||
|
me.TargetPaths = lastTargetPaths
|
||||||
|
|
||||||
|
me.CreateTime = time.Now()
|
||||||
|
volumeMountCache.volumes[volID] = me
|
||||||
|
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error {
|
||||||
|
if !mc.isEnable() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
volumeMountCacheMtx.Lock()
|
||||||
|
defer volumeMountCacheMtx.Unlock()
|
||||||
|
delete(volumeMountCache.volumes, volID)
|
||||||
|
return mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error {
|
||||||
|
if !mc.isEnable() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
volumeMountCacheMtx.Lock()
|
||||||
|
defer volumeMountCacheMtx.Unlock()
|
||||||
|
|
||||||
|
_, ok := volumeMountCache.volumes[volID]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("mount-cache: node publish volume failed to find cache entry for volume")
|
||||||
|
}
|
||||||
|
volumeMountCache.volumes[volID].TargetPaths[targetPath] = readOnly
|
||||||
|
return mc.updateNodeCache(volID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error {
|
||||||
|
if !mc.isEnable() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
volumeMountCacheMtx.Lock()
|
||||||
|
defer volumeMountCacheMtx.Unlock()
|
||||||
|
|
||||||
|
_, ok := volumeMountCache.volumes[volID]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume")
|
||||||
|
}
|
||||||
|
delete(volumeMountCache.volumes[volID].TargetPaths, targetPath)
|
||||||
|
return mc.updateNodeCache(volID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *volumeMountCacheMap) updateNodeCache(volID string) error {
|
||||||
|
me := volumeMountCache.volumes[volID]
|
||||||
|
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
||||||
|
klog.Infof("mount-cache: metadata not found, delete mount cache failed for volume %s", volID)
|
||||||
|
}
|
||||||
|
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeCredentials(input map[string]string) (output map[string]string) {
|
||||||
|
output = make(map[string]string)
|
||||||
|
for key, value := range input {
|
||||||
|
nKey := base64.StdEncoding.EncodeToString([]byte(key))
|
||||||
|
nValue := base64.StdEncoding.EncodeToString([]byte(value))
|
||||||
|
output[nKey] = nValue
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeCredentials(input map[string]string) (output map[string]string) {
|
||||||
|
output = make(map[string]string)
|
||||||
|
for key, value := range input {
|
||||||
|
nKey, err := base64.StdEncoding.DecodeString(key)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("mount-cache: decode secret fail")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nValue, err := base64.StdEncoding.DecodeString(value)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("mount-cache: decode secret fail")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
output[string(nKey)] = string(nValue)
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
38
pkg/cephfs/mountcache_test.go
Normal file
38
pkg/cephfs/mountcache_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package cephfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMountOneCacheEntry(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemountHisMountedPath(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeStageVolume(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeUnStageVolume(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodePublishVolume(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeUnpublishVolume(t *testing.T) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncodeDecodeCredentials(t *testing.T) {
|
||||||
|
secrets := make(map[string]string)
|
||||||
|
secrets["user_1"] = "value_1"
|
||||||
|
enSecrets := encodeCredentials(secrets)
|
||||||
|
deSecrets := decodeCredentials(enSecrets)
|
||||||
|
for key, value := range secrets {
|
||||||
|
if deSecrets[key] != value {
|
||||||
|
t.Errorf("key %s of credentials's value %s change after decode %s ", key, value, deSecrets[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -21,63 +21,65 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||||
|
|
||||||
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
"k8s.io/klog"
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type nodeServer struct {
|
// NodeServer struct of ceph CSI driver with supported methods of CSI
|
||||||
|
// node server spec.
|
||||||
|
type NodeServer struct {
|
||||||
*csicommon.DefaultNodeServer
|
*csicommon.DefaultNodeServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
|
|
||||||
var (
|
var (
|
||||||
userCr *credentials
|
mtxNodeVolumeID = keymutex.NewHashed(0)
|
||||||
err error
|
)
|
||||||
|
|
||||||
|
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
|
||||||
|
var (
|
||||||
|
cr *credentials
|
||||||
|
secrets = req.GetSecrets()
|
||||||
)
|
)
|
||||||
|
|
||||||
if volOptions.ProvisionVolume {
|
if volOptions.ProvisionVolume {
|
||||||
// The volume is provisioned dynamically, get the credentials directly from Ceph
|
// The volume is provisioned dynamically, get the credentials directly from Ceph
|
||||||
|
|
||||||
// First, store admin credentials - those are needed for retrieving the user credentials
|
// First, get admin credentials - those are needed for retrieving the user credentials
|
||||||
|
|
||||||
adminCr, err := getAdminCredentials(req.GetNodeStageSecrets())
|
adminCr, err := getAdminCredentials(secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
|
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = storeCephCredentials(volId, adminCr); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then get the ceph user
|
// Then get the ceph user
|
||||||
|
|
||||||
entity, err := getCephUser(adminCr, volId)
|
entity, err := getCephUser(volOptions, adminCr, volID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get ceph user: %v", err)
|
return nil, fmt.Errorf("failed to get ceph user: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
userCr = entity.toCredentials()
|
cr = entity.toCredentials()
|
||||||
} else {
|
} else {
|
||||||
// The volume is pre-made, credentials are in node stage secrets
|
// The volume is pre-made, credentials are in node stage secrets
|
||||||
|
|
||||||
userCr, err = getUserCredentials(req.GetNodeStageSecrets())
|
userCr, err := getUserCredentials(req.GetSecrets())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
|
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cr = userCr
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = storeCephCredentials(volId, userCr); err != nil {
|
return cr, nil
|
||||||
return nil, fmt.Errorf("failed to store ceph user credentials: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return userCr, nil
|
// NodeStageVolume mounts the volume to a staging path on the node.
|
||||||
}
|
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||||
|
|
||||||
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
|
||||||
if err := validateNodeStageVolumeRequest(req); err != nil {
|
if err := validateNodeStageVolumeRequest(req); err != nil {
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
@ -85,70 +87,82 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
|||||||
// Configuration
|
// Configuration
|
||||||
|
|
||||||
stagingTargetPath := req.GetStagingTargetPath()
|
stagingTargetPath := req.GetStagingTargetPath()
|
||||||
volId := volumeID(req.GetVolumeId())
|
volID := volumeID(req.GetVolumeId())
|
||||||
|
|
||||||
volOptions, err := newVolumeOptions(req.GetVolumeAttributes())
|
volOptions, err := newVolumeOptions(req.GetVolumeContext(), req.GetSecrets())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error reading volume options for volume %s: %v", volId, err)
|
klog.Errorf("error reading volume options for volume %s: %v", volID, err)
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if volOptions.ProvisionVolume {
|
if volOptions.ProvisionVolume {
|
||||||
// Dynamically provisioned volumes don't have their root path set, do it here
|
// Dynamically provisioned volumes don't have their root path set, do it here
|
||||||
volOptions.RootPath = getVolumeRootPathCeph(volId)
|
volOptions.RootPath = getVolumeRootPathCeph(volID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = createMountPoint(stagingTargetPath); err != nil {
|
if err = createMountPoint(stagingTargetPath); err != nil {
|
||||||
glog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volId, err)
|
klog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volID, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId}
|
mtxNodeVolumeID.LockKey(string(volID))
|
||||||
if err = cephConf.writeToFile(); err != nil {
|
defer mustUnlock(mtxNodeVolumeID, string(volID))
|
||||||
glog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volId), volId, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the volume is already mounted
|
// Check if the volume is already mounted
|
||||||
|
|
||||||
isMnt, err := isMountPoint(stagingTargetPath)
|
isMnt, err := isMountPoint(stagingTargetPath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("stat failed: %v", err)
|
klog.Errorf("stat failed: %v", err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if isMnt {
|
if isMnt {
|
||||||
glog.Infof("cephfs: volume %s is already mounted to %s, skipping", volId, stagingTargetPath)
|
klog.Infof("cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||||
return &csi.NodeStageVolumeResponse{}, nil
|
return &csi.NodeStageVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's not, mount now
|
// It's not, mount now
|
||||||
|
if err = ns.mount(volOptions, req); err != nil {
|
||||||
cr, err := getCredentialsForVolume(volOptions, volId, req)
|
return nil, err
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("failed to get ceph credentials for volume %s: %v", volId, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := newMounter(volOptions)
|
klog.Infof("cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("failed to create mounter for volume %s: %v", volId, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name())
|
|
||||||
|
|
||||||
if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil {
|
|
||||||
glog.Errorf("failed to mount volume %s: %v", volId, err)
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully mounted volume %s to %s", volId, stagingTargetPath)
|
|
||||||
|
|
||||||
return &csi.NodeStageVolumeResponse{}, nil
|
return &csi.NodeStageVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
|
||||||
|
stagingTargetPath := req.GetStagingTargetPath()
|
||||||
|
volID := volumeID(req.GetVolumeId())
|
||||||
|
|
||||||
|
cr, err := getCredentialsForVolume(volOptions, volID, req)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err)
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := newMounter(volOptions)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("failed to create mounter for volume %s: %v", volID, err)
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name())
|
||||||
|
|
||||||
|
if err = m.mount(stagingTargetPath, cr, volOptions); err != nil {
|
||||||
|
klog.Errorf("failed to mount volume %s: %v", volID, err)
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
if err := volumeMountCache.nodeStageVolume(req.GetVolumeId(), stagingTargetPath, req.GetSecrets()); err != nil {
|
||||||
|
klog.Warningf("mount-cache: failed to stage volume %s %s: %v", volID, stagingTargetPath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodePublishVolume mounts the volume mounted to the staging path to the target
|
||||||
|
// path
|
||||||
|
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||||
if err := validateNodePublishVolumeRequest(req); err != nil {
|
if err := validateNodePublishVolumeRequest(req); err != nil {
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
@ -156,10 +170,10 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||||||
// Configuration
|
// Configuration
|
||||||
|
|
||||||
targetPath := req.GetTargetPath()
|
targetPath := req.GetTargetPath()
|
||||||
volId := req.GetVolumeId()
|
volID := req.GetVolumeId()
|
||||||
|
|
||||||
if err := createMountPoint(targetPath); err != nil {
|
if err := createMountPoint(targetPath); err != nil {
|
||||||
glog.Errorf("failed to create mount point at %s: %v", targetPath, err)
|
klog.Errorf("failed to create mount point at %s: %v", targetPath, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,66 +182,89 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||||||
isMnt, err := isMountPoint(targetPath)
|
isMnt, err := isMountPoint(targetPath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("stat failed: %v", err)
|
klog.Errorf("stat failed: %v", err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if isMnt {
|
if isMnt {
|
||||||
glog.Infof("cephfs: volume %s is already bind-mounted to %s", volId, targetPath)
|
klog.Infof("cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
||||||
return &csi.NodePublishVolumeResponse{}, nil
|
return &csi.NodePublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's not, mount now
|
// It's not, mount now
|
||||||
|
|
||||||
if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly()); err != nil {
|
if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly()); err != nil {
|
||||||
glog.Errorf("failed to bind-mount volume %s: %v", volId, err)
|
klog.Errorf("failed to bind-mount volume %s: %v", volID, err)
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully bind-mounted volume %s to %s", volId, targetPath)
|
if err := volumeMountCache.nodePublishVolume(volID, targetPath, req.GetReadonly()); err != nil {
|
||||||
|
klog.Warningf("mount-cache: failed to publish volume %s %s: %v", volID, targetPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
||||||
|
|
||||||
return &csi.NodePublishVolumeResponse{}, nil
|
return &csi.NodePublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
// NodeUnpublishVolume unmounts the volume from the target path
|
||||||
if err := validateNodeUnpublishVolumeRequest(req); err != nil {
|
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||||
|
var err error
|
||||||
|
if err = validateNodeUnpublishVolumeRequest(req); err != nil {
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
targetPath := req.GetTargetPath()
|
targetPath := req.GetTargetPath()
|
||||||
|
|
||||||
|
volID := req.GetVolumeId()
|
||||||
|
if err = volumeMountCache.nodeUnPublishVolume(volID, targetPath); err != nil {
|
||||||
|
klog.Warningf("mount-cache: failed to unpublish volume %s %s: %v", volID, targetPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Unmount the bind-mount
|
// Unmount the bind-mount
|
||||||
if err := unmountVolume(targetPath); err != nil {
|
if err = unmountVolume(targetPath); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Remove(targetPath)
|
if err = os.Remove(targetPath); err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
|
klog.Infof("cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
|
||||||
|
|
||||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
// NodeUnstageVolume unstages the volume from the staging path
|
||||||
if err := validateNodeUnstageVolumeRequest(req); err != nil {
|
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||||
|
var err error
|
||||||
|
if err = validateNodeUnstageVolumeRequest(req); err != nil {
|
||||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
stagingTargetPath := req.GetStagingTargetPath()
|
stagingTargetPath := req.GetStagingTargetPath()
|
||||||
|
|
||||||
|
volID := req.GetVolumeId()
|
||||||
|
if err = volumeMountCache.nodeUnStageVolume(volID); err != nil {
|
||||||
|
klog.Warningf("mount-cache: failed to unstage volume %s %s: %v", volID, stagingTargetPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Unmount the volume
|
// Unmount the volume
|
||||||
if err := unmountVolume(stagingTargetPath); err != nil {
|
if err = unmountVolume(stagingTargetPath); err != nil {
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Remove(stagingTargetPath)
|
if err = os.Remove(stagingTargetPath); err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
glog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
klog.Infof("cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||||
|
|
||||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
// NodeGetCapabilities returns the supported capabilities of the node server
|
||||||
|
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||||
return &csi.NodeGetCapabilitiesResponse{
|
return &csi.NodeGetCapabilitiesResponse{
|
||||||
Capabilities: []*csi.NodeServiceCapability{
|
Capabilities: []*csi.NodeServiceCapability{
|
||||||
{
|
{
|
||||||
|
@ -19,48 +19,70 @@ package cephfs
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
"k8s.io/klog"
|
||||||
|
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
"github.com/ceph/ceph-csi/pkg/util"
|
||||||
"github.com/pborman/uuid"
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
|
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
)
|
)
|
||||||
|
|
||||||
type volumeID string
|
type volumeID string
|
||||||
|
|
||||||
func newVolumeID() volumeID {
|
func mustUnlock(m keymutex.KeyMutex, key string) {
|
||||||
return volumeID("csi-cephfs-" + uuid.NewUUID().String())
|
if err := m.UnlockKey(key); err != nil {
|
||||||
|
klog.Fatalf("failed to unlock mutex for %s: %v", key, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func execCommand(command string, args ...string) ([]byte, error) {
|
func makeVolumeID(volName string) volumeID {
|
||||||
glog.V(4).Infof("cephfs: EXEC %s %s", command, args)
|
return volumeID("csi-cephfs-" + volName)
|
||||||
|
|
||||||
cmd := exec.Command(command, args...)
|
|
||||||
return cmd.CombinedOutput()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func execCommandAndValidate(program string, args ...string) error {
|
func execCommand(program string, args ...string) (stdout, stderr []byte, err error) {
|
||||||
out, err := execCommand(program, args...)
|
var (
|
||||||
if err != nil {
|
cmd = exec.Command(program, args...) // nolint: gosec
|
||||||
return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out)
|
sanitizedArgs = util.StripSecretInArgs(args)
|
||||||
|
stdoutBuf bytes.Buffer
|
||||||
|
stderrBuf bytes.Buffer
|
||||||
|
)
|
||||||
|
|
||||||
|
cmd.Stdout = &stdoutBuf
|
||||||
|
cmd.Stderr = &stderrBuf
|
||||||
|
|
||||||
|
klog.V(4).Infof("cephfs: EXEC %s %s", program, sanitizedArgs)
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("an error occurred while running (%d) %s %v: %v: %s",
|
||||||
|
cmd.Process.Pid, program, sanitizedArgs, err, stderrBuf.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCommandErr(program string, args ...string) error {
|
||||||
|
_, _, err := execCommand(program, args...)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func execCommandJSON(v interface{}, program string, args ...string) error {
|
func execCommandJSON(v interface{}, program string, args ...string) error {
|
||||||
out, err := execCommand(program, args...)
|
stdout, _, err := execCommand(program, args...)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.NewDecoder(bytes.NewReader(out)).Decode(v)
|
if err = json.Unmarshal(stdout, v); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, util.StripSecretInArgs(args), stdout, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used in isMountPoint()
|
// Used in isMountPoint()
|
||||||
@ -75,33 +97,13 @@ func isMountPoint(p string) (bool, error) {
|
|||||||
return !notMnt, nil
|
return !notMnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeCephCredentials(volId volumeID, cr *credentials) error {
|
func pathExists(p string) bool {
|
||||||
keyringData := cephKeyringData{
|
_, err := os.Stat(p)
|
||||||
UserId: cr.id,
|
return err == nil
|
||||||
Key: cr.key,
|
|
||||||
VolumeID: volId,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := keyringData.writeToFile(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
secret := cephSecretData{
|
|
||||||
UserId: cr.id,
|
|
||||||
Key: cr.key,
|
|
||||||
VolumeID: volId,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := secret.writeToFile()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Controller service request validation
|
// Controller service request validation
|
||||||
//
|
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
|
||||||
|
|
||||||
func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
|
|
||||||
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||||
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
|
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
|
||||||
}
|
}
|
||||||
@ -120,10 +122,11 @@ func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeReq
|
|||||||
return status.Error(codes.Unimplemented, "block volume not supported")
|
return status.Error(codes.Unimplemented, "block volume not supported")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *controllerServer) validateDeleteVolumeRequest(req *csi.DeleteVolumeRequest) error {
|
func (cs *ControllerServer) validateDeleteVolumeRequest() error {
|
||||||
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||||
return fmt.Errorf("invalid DeleteVolumeRequest: %v", err)
|
return fmt.Errorf("invalid DeleteVolumeRequest: %v", err)
|
||||||
}
|
}
|
||||||
@ -131,25 +134,22 @@ func (cs *controllerServer) validateDeleteVolumeRequest(req *csi.DeleteVolumeReq
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// Node service request validation
|
// Node service request validation
|
||||||
//
|
|
||||||
|
|
||||||
func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
||||||
if req.GetVolumeCapability() == nil {
|
if req.GetVolumeCapability() == nil {
|
||||||
return fmt.Errorf("volume capability missing in request")
|
return errors.New("volume capability missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetVolumeId() == "" {
|
if req.GetVolumeId() == "" {
|
||||||
return fmt.Errorf("volume ID missing in request")
|
return errors.New("volume ID missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetStagingTargetPath() == "" {
|
if req.GetStagingTargetPath() == "" {
|
||||||
return fmt.Errorf("staging target path missing in request")
|
return errors.New("staging target path missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetNodeStageSecrets() == nil || len(req.GetNodeStageSecrets()) == 0 {
|
if req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {
|
||||||
return fmt.Errorf("stage secrets cannot be nil or empty")
|
return errors.New("stage secrets cannot be nil or empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -157,11 +157,11 @@ func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
|||||||
|
|
||||||
func validateNodeUnstageVolumeRequest(req *csi.NodeUnstageVolumeRequest) error {
|
func validateNodeUnstageVolumeRequest(req *csi.NodeUnstageVolumeRequest) error {
|
||||||
if req.GetVolumeId() == "" {
|
if req.GetVolumeId() == "" {
|
||||||
return fmt.Errorf("volume ID missing in request")
|
return errors.New("volume ID missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetStagingTargetPath() == "" {
|
if req.GetStagingTargetPath() == "" {
|
||||||
return fmt.Errorf("staging target path missing in request")
|
return errors.New("staging target path missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -169,15 +169,15 @@ func validateNodeUnstageVolumeRequest(req *csi.NodeUnstageVolumeRequest) error {
|
|||||||
|
|
||||||
func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
|
func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
|
||||||
if req.GetVolumeCapability() == nil {
|
if req.GetVolumeCapability() == nil {
|
||||||
return fmt.Errorf("volume capability missing in request")
|
return errors.New("volume capability missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetVolumeId() == "" {
|
if req.GetVolumeId() == "" {
|
||||||
return fmt.Errorf("volume ID missing in request")
|
return errors.New("volume ID missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetTargetPath() == "" {
|
if req.GetTargetPath() == "" {
|
||||||
return fmt.Errorf("varget path missing in request")
|
return errors.New("target path missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -185,11 +185,11 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
|
|||||||
|
|
||||||
func validateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) error {
|
func validateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) error {
|
||||||
if req.GetVolumeId() == "" {
|
if req.GetVolumeId() == "" {
|
||||||
return fmt.Errorf("volume ID missing in request")
|
return errors.New("volume ID missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.GetTargetPath() == "" {
|
if req.GetTargetPath() == "" {
|
||||||
return fmt.Errorf("target path missing in request")
|
return errors.New("target path missing in request")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -20,6 +20,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -28,113 +30,127 @@ const (
|
|||||||
namespacePrefix = "ns-"
|
namespacePrefix = "ns-"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func getCephRootPathLocal(volID volumeID) string {
|
||||||
cephRootPrefix = PluginFolder + "/controller/volumes/root-"
|
return fmt.Sprintf("%s/controller/volumes/root-%s", PluginFolder, string(volID))
|
||||||
)
|
|
||||||
|
|
||||||
func getCephRootPathLocal(volId volumeID) string {
|
|
||||||
return cephRootPrefix + string(volId)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCephRootVolumePathLocal(volId volumeID) string {
|
func getCephRootVolumePathLocal(volID volumeID) string {
|
||||||
return path.Join(getCephRootPathLocal(volId), cephVolumesRoot, string(volId))
|
return path.Join(getCephRootPathLocal(volID), cephVolumesRoot, string(volID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeRootPathCeph(volId volumeID) string {
|
func getVolumeRootPathCeph(volID volumeID) string {
|
||||||
return path.Join("/", cephVolumesRoot, string(volId))
|
return path.Join("/", cephVolumesRoot, string(volID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeNamespace(volId volumeID) string {
|
func getVolumeNamespace(volID volumeID) string {
|
||||||
return namespacePrefix + string(volId)
|
return namespacePrefix + string(volID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setVolumeAttribute(root, attrName, attrValue string) error {
|
func setVolumeAttribute(root, attrName, attrValue string) error {
|
||||||
return execCommandAndValidate("setfattr", "-n", attrName, "-v", attrValue, root)
|
return execCommandErr("setfattr", "-n", attrName, "-v", attrValue, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeID, bytesQuota int64) error {
|
func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error {
|
||||||
cephRoot := getCephRootPathLocal(volId)
|
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
|
||||||
|
|
||||||
if err := createMountPoint(cephRoot); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer unmountCephRoot(volID)
|
||||||
|
|
||||||
// RootPath is not set for a dynamically provisioned volume
|
var (
|
||||||
// Access to cephfs's / is required
|
volRoot = getCephRootVolumePathLocal(volID)
|
||||||
volOptions.RootPath = "/"
|
volRootCreating = volRoot + "-creating"
|
||||||
|
)
|
||||||
|
|
||||||
m, err := newMounter(volOptions)
|
if pathExists(volRoot) {
|
||||||
if err != nil {
|
klog.V(4).Infof("cephfs: volume %s already exists, skipping creation", volID)
|
||||||
return fmt.Errorf("failed to create mounter: %v", err)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
|
if err := createMountPoint(volRootCreating); err != nil {
|
||||||
return fmt.Errorf("error mounting ceph root: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
unmountVolume(cephRoot)
|
|
||||||
os.Remove(cephRoot)
|
|
||||||
}()
|
|
||||||
|
|
||||||
volOptions.RootPath = getVolumeRootPathCeph(volId)
|
|
||||||
localVolRoot := getCephRootVolumePathLocal(volId)
|
|
||||||
|
|
||||||
if err := createMountPoint(localVolRoot); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytesQuota > 0 {
|
if bytesQuota > 0 {
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
if err := setVolumeAttribute(volRootCreating, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
||||||
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
|
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volId))
|
if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil {
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions) error {
|
if err := os.Rename(volRootCreating, volRoot); err != nil {
|
||||||
|
return fmt.Errorf("couldn't mark volume %s as created: %v", volID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error {
|
||||||
|
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer unmountCephRoot(volID)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
cephRoot = getCephRootPathLocal(volId)
|
volRoot = getCephRootVolumePathLocal(volID)
|
||||||
volRoot = getCephRootVolumePathLocal(volId)
|
|
||||||
volRootDeleting = volRoot + "-deleting"
|
volRootDeleting = volRoot + "-deleting"
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := createMountPoint(cephRoot); err != nil {
|
if pathExists(volRoot) {
|
||||||
return err
|
if err := os.Rename(volRoot, volRootDeleting); err != nil {
|
||||||
|
return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if !pathExists(volRootDeleting) {
|
||||||
|
klog.V(4).Infof("cephfs: volume %s not found, assuming it to be already deleted", volID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.RemoveAll(volRootDeleting); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete volume %s: %v", volID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *credentials) error {
|
||||||
|
cephRoot := getCephRootPathLocal(volID)
|
||||||
|
|
||||||
// Root path is not set for dynamically provisioned volumes
|
// Root path is not set for dynamically provisioned volumes
|
||||||
// Access to cephfs's / is required
|
// Access to cephfs's / is required
|
||||||
volOptions.RootPath = "/"
|
volOptions.RootPath = "/"
|
||||||
|
|
||||||
|
if err := createMountPoint(cephRoot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
m, err := newMounter(volOptions)
|
m, err := newMounter(volOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create mounter: %v", err)
|
return fmt.Errorf("failed to create mounter: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
|
if err = m.mount(cephRoot, adminCr, volOptions); err != nil {
|
||||||
return fmt.Errorf("error mounting ceph root: %v", err)
|
return fmt.Errorf("error mounting ceph root: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
|
||||||
unmountVolume(volRoot)
|
|
||||||
os.Remove(volRoot)
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := os.Rename(volRoot, volRootDeleting); err != nil {
|
|
||||||
return fmt.Errorf("coudln't mark volume %s for deletion: %v", volId, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.RemoveAll(volRootDeleting); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete volume %s: %v", volId, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unmountCephRoot(volID volumeID) {
|
||||||
|
cephRoot := getCephRootPathLocal(volID)
|
||||||
|
|
||||||
|
if err := unmountVolume(cephRoot); err != nil {
|
||||||
|
klog.Errorf("failed to unmount %s with error %s", cephRoot, err)
|
||||||
|
} else {
|
||||||
|
if err := os.Remove(cephRoot); err != nil {
|
||||||
|
klog.Errorf("failed to remove %s with error %s", cephRoot, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,10 +17,15 @@ limitations under the License.
|
|||||||
package cephfs
|
package cephfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -30,12 +35,20 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
availableMounters []string
|
availableMounters []string
|
||||||
|
|
||||||
|
// maps a mountpoint to PID of its FUSE daemon
|
||||||
|
fusePidMap = make(map[string]int)
|
||||||
|
fusePidMapMtx sync.Mutex
|
||||||
|
|
||||||
|
fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Load available ceph mounters installed on system into availableMounters
|
// Load available ceph mounters installed on system into availableMounters
|
||||||
// Called from driver.go's Run()
|
// Called from driver.go's Run()
|
||||||
func loadAvailableMounters() error {
|
func loadAvailableMounters() error {
|
||||||
|
// #nosec
|
||||||
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
|
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
|
||||||
|
// #nosec
|
||||||
kernelMounterProbe := exec.Command("mount.ceph")
|
kernelMounterProbe := exec.Command("mount.ceph")
|
||||||
|
|
||||||
if fuseMounterProbe.Run() == nil {
|
if fuseMounterProbe.Run() == nil {
|
||||||
@ -47,14 +60,14 @@ func loadAvailableMounters() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(availableMounters) == 0 {
|
if len(availableMounters) == 0 {
|
||||||
return fmt.Errorf("no ceph mounters found on system")
|
return errors.New("no ceph mounters found on system")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type volumeMounter interface {
|
type volumeMounter interface {
|
||||||
mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error
|
mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error
|
||||||
name() string
|
name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,71 +111,84 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
|||||||
|
|
||||||
type fuseMounter struct{}
|
type fuseMounter struct{}
|
||||||
|
|
||||||
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
|
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||||
args := [...]string{
|
args := [...]string{
|
||||||
mountPoint,
|
mountPoint,
|
||||||
"-c", getCephConfPath(volId),
|
"-m", volOptions.Monitors,
|
||||||
"-n", cephEntityClientPrefix + cr.id,
|
"-c", cephConfigPath,
|
||||||
"--keyring", getCephKeyringPath(volId, cr.id),
|
"-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key,
|
||||||
"-r", volOptions.RootPath,
|
"-r", volOptions.RootPath,
|
||||||
"-o", "nonempty",
|
"-o", "nonempty",
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := execCommand("ceph-fuse", args[:]...)
|
_, stderr, err := execCommand("ceph-fuse", args[:]...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %s\ncephfs: ceph-fuse output: %s", err, out)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Contains(out, []byte("starting fuse")) {
|
// Parse the output:
|
||||||
return fmt.Errorf("cephfs: ceph-fuse failed:\ncephfs: ceph-fuse output: %s", out)
|
// We need "starting fuse" meaning the mount is ok
|
||||||
|
// and PID of the ceph-fuse daemon for unmount
|
||||||
|
|
||||||
|
match := fusePidRx.FindSubmatch(stderr)
|
||||||
|
if len(match) != 2 {
|
||||||
|
return fmt.Errorf("ceph-fuse failed: %s", stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pid, err := strconv.Atoi(string(match[1]))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse FUSE daemon PID: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fusePidMapMtx.Lock()
|
||||||
|
fusePidMap[mountPoint] = pid
|
||||||
|
fusePidMapMtx.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
|
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||||
if err := createMountPoint(mountPoint); err != nil {
|
if err := createMountPoint(mountPoint); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return mountFuse(mountPoint, cr, volOptions, volId)
|
return mountFuse(mountPoint, cr, volOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
|
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
|
||||||
|
|
||||||
type kernelMounter struct{}
|
type kernelMounter struct{}
|
||||||
|
|
||||||
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
|
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||||
if err := execCommandAndValidate("modprobe", "ceph"); err != nil {
|
if err := execCommandErr("modprobe", "ceph"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return execCommandAndValidate("mount",
|
return execCommandErr("mount",
|
||||||
"-t", "ceph",
|
"-t", "ceph",
|
||||||
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
||||||
mountPoint,
|
mountPoint,
|
||||||
"-o",
|
"-o", fmt.Sprintf("name=%s,secret=%s", cr.id, cr.key),
|
||||||
fmt.Sprintf("name=%s,secretfile=%s", cr.id, getCephSecretPath(volId, cr.id)),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
|
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||||
if err := createMountPoint(mountPoint); err != nil {
|
if err := createMountPoint(mountPoint); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return mountKernel(mountPoint, cr, volOptions, volId)
|
return mountKernel(mountPoint, cr, volOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *kernelMounter) name() string { return "Ceph kernel client" }
|
func (m *kernelMounter) name() string { return "Ceph kernel client" }
|
||||||
|
|
||||||
func bindMount(from, to string, readOnly bool) error {
|
func bindMount(from, to string, readOnly bool) error {
|
||||||
if err := execCommandAndValidate("mount", "--bind", from, to); err != nil {
|
if err := execCommandErr("mount", "--bind", from, to); err != nil {
|
||||||
return fmt.Errorf("failed to bind-mount %s to %s: %v", from, to, err)
|
return fmt.Errorf("failed to bind-mount %s to %s: %v", from, to, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if readOnly {
|
if readOnly {
|
||||||
if err := execCommandAndValidate("mount", "-o", "remount,ro,bind", to); err != nil {
|
if err := execCommandErr("mount", "-o", "remount,ro,bind", to); err != nil {
|
||||||
return fmt.Errorf("failed read-only remount of %s: %v", to, err)
|
return fmt.Errorf("failed read-only remount of %s: %v", to, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -171,7 +197,29 @@ func bindMount(from, to string, readOnly bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func unmountVolume(mountPoint string) error {
|
func unmountVolume(mountPoint string) error {
|
||||||
return execCommandAndValidate("umount", mountPoint)
|
if err := execCommandErr("umount", mountPoint); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fusePidMapMtx.Lock()
|
||||||
|
pid, ok := fusePidMap[mountPoint]
|
||||||
|
if ok {
|
||||||
|
delete(fusePidMap, mountPoint)
|
||||||
|
}
|
||||||
|
fusePidMapMtx.Unlock()
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
p, err := os.FindProcess(pid)
|
||||||
|
if err != nil {
|
||||||
|
klog.Warningf("failed to find process %d: %v", pid, err)
|
||||||
|
} else {
|
||||||
|
if _, err = p.Wait(); err != nil {
|
||||||
|
klog.Warningf("%d is not a child process: %v", pid, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMountPoint(root string) error {
|
func createMountPoint(root string) error {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user