Merge remote-tracking branch 'ceph/csi-v1.0' into csi-v1.0

This commit is contained in:
Miao Zhou 2019-02-14 18:54:00 +08:00
commit f9d30a4c68
14573 changed files with 106249 additions and 3359580 deletions

View File

@ -1,27 +1,54 @@
---
# need for docker build
sudo: true
addons:
apt:
packages:
- realpath
- ruby
language: go
branches:
only:
- master
- csi-v1.0
go: 1.9.x
go: 1.11.x
before_script:
- GO_FILES=$(find . -iname '*.go' -type f | grep -v /vendor/)
- go get -u golang.org/x/lint/golint #go get github.com/golang/lint/golint
env:
global:
- GO_METALINTER_VERSION="v3.0.0"
- TEST_COVERAGE=stdout
- GO_METALINTER_THREADS=1
- GO_COVER_DIR=_output
script:
- test -z $(gofmt -s -l $GO_FILES)
- go vet -v $(go list ./... | grep -v /vendor/)
jobs:
include:
- name: Linter
install:
- gem install mdl
- pip install --user --upgrade pip
- pip install --user yamllint
# install gometalinter
- curl -L
"https://raw.githubusercontent.com/alecthomas/gometalinter/"${GO_METALINTER_VERSION}"/scripts/install.sh"
| bash -s -- -b $GOPATH/bin "${GO_METALINTER_VERSION}"
script:
- scripts/lint-text.sh --require-all
- scripts/lint-go.sh
- scripts/test-go.sh
- name: rbdplugin
script:
- make rbdplugin
- name: cephfsplugin
script:
- make cephfsplugin
deploy:
- provider: script
script:
- ./deploy.sh
on:
on: # yamllint disable-line rule:truthy
all_branches: true
script: ./deploy.sh

440
Gopkg.lock generated
View File

@ -2,25 +2,37 @@
[[projects]]
digest = "1:93147eb1d6f08d39f2c0efe3d29ee043bda72be7a8b3b367eb08c72c18524638"
digest = "1:94ffc0947c337d618b6ff5ed9abaddc1217b090c1b3a1ae4739b35b7b25851d5"
name = "github.com/container-storage-interface/spec"
packages = ["lib/go/csi"]
pruneopts = ""
pruneopts = "NUT"
revision = "ed0bb0e1557548aa028307f48728767cfe8f6345"
version = "v1.0.0"
[[projects]]
digest = "1:abea725bcf0210887f5da19d804fffa1dd45a42a56bdf5f02322345e3fee4f0d"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "NUT"
revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a"
digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = ""
pruneopts = "NUT"
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
digest = "1:bff0ce7c8e3d6357fa5a8549bbe4bdb620bddc13c11ae569aa7248ea92e2139f"
name = "github.com/golang/protobuf"
packages = [
"descriptor",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
@ -29,48 +41,160 @@
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = ""
pruneopts = "NUT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
digest = "1:a25a2c5ae694b01713fb6cd03c3b1ac1ccc1902b9f0a922680a88ec254f968e1"
branch = "master"
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "NUT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
branch = "master"
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "NUT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = ""
pruneopts = "NUT"
revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:01f6264649510cce626f907688df44f80c8ba788d064756701d85c51f21eb0d6"
name = "github.com/kubernetes-csi/drivers"
packages = ["pkg/csi-common"]
pruneopts = ""
revision = "8a7f2d3a4057ddcff31b7898bd4413dfdf3a3b67"
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions",
]
pruneopts = "NUT"
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
version = "v0.2.0"
[[projects]]
digest = "1:a5484d4fa43127138ae6e7b2299a6a52ae006c7f803d98d717f60abf3e97192e"
branch = "master"
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "NUT"
revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
[[projects]]
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "NUT"
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
version = "v0.3.6"
[[projects]]
digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "NUT"
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
version = "v1.1.5"
[[projects]]
digest = "1:2b060bb1a39127e592baf9ab62ec1e94100dc22107f915183f3cd1f6d1cd579a"
name = "github.com/kubernetes-csi/csi-lib-utils"
packages = ["protosanitizer"]
pruneopts = "NUT"
revision = "5853414e1d4771302e0df10d1870c444c2135799"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:0bde3fb932a1aa4e12bc43ef91157fcda27dd0fc5d9f309647544ceaec075f48"
name = "github.com/kubernetes-csi/drivers"
packages = ["pkg/csi-common"]
pruneopts = "NUT"
revision = "05e1ea84df03b90296869812fa42f4244bd5ab53"
[[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "NUT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "NUT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf"
name = "github.com/pborman/uuid"
packages = ["."]
pruneopts = ""
pruneopts = "NUT"
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
version = "v1.2"
[[projects]]
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
branch = "master"
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = "NUT"
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = "NUT"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = ""
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
pruneopts = "NUT"
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "master"
digest = "1:ea539c13b066dac72a940b62f37600a20ab8e88057397c78f3197c1a48475425"
digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
[[projects]]
branch = "master"
digest = "1:d4e37d487310720926343302a747f3f9e8f020e5fe961190c57ce437159a6e63"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
@ -78,19 +202,33 @@
"internal/timeseries",
"trace",
]
pruneopts = ""
revision = "351d144fa1fc0bd934e2408202be0c29f25e35a0"
pruneopts = "NUT"
revision = "915654e7eabcea33ae277abbecf52f0d8b7a9fdc"
[[projects]]
branch = "master"
digest = "1:f358024b019f87eecaadcb098113a40852c94fe58ea670ef3c3e2d2c7bd93db1"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = ""
revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31"
digest = "1:293b8e3359faf71cc5f85f3c144115ca6540396f4e1ff0fa30cd014c04258c30"
name = "golang.org/x/oauth2"
packages = [
".",
"internal",
]
pruneopts = "NUT"
revision = "36a7019397c4c86cf59eeab3bc0d188bac444277"
[[projects]]
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
branch = "master"
digest = "1:0f0298002380ddbc31230516fc1dc354ff466e607823e9122d69cce79310bdc9"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "a457fd036447854c0c02e89ea439481bdcf941a2"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
name = "golang.org/x/text"
packages = [
"collate",
@ -108,37 +246,66 @@
"unicode/norm",
"unicode/rangetable",
]
pruneopts = ""
pruneopts = "NUT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:5fc6c317675b746d0c641b29aa0aab5fcb403c0d07afdbf0de86b0d447a0502a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = ""
revision = "bd91e49a0898e27abb88c339b432fa53d7497ac0"
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
[[projects]]
digest = "1:1293087271e314cfa2b3decededba2ecba0ff327e7b7809e00f73f616449191c"
digest = "1:34c10243da5972105edd1b4b883e2bd918fbb3f73fbe14d6af6929e547173494"
name = "google.golang.org/appengine"
packages = [
"internal",
"internal/base",
"internal/datastore",
"internal/log",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "NUT"
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
version = "v1.4.0"
[[projects]]
branch = "master"
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "NUT"
revision = "db91494dd46c1fdcbbde05e5ff5eb56df8f7d79a"
[[projects]]
digest = "1:638e6e596d67d0a0c8aeb76ebdcf73561b701ea43f21963b1db231d96ed7db68"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"binarylog/grpc_binarylog_v1",
"codes",
"connectivity",
"credentials",
"credentials/internal",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/binarylog",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/grpcsync",
"internal/syscall",
"internal/transport",
"keepalive",
"metadata",
@ -151,32 +318,186 @@
"status",
"tap",
]
pruneopts = ""
revision = "2e463a05d100327ca47ac218281906921038fd95"
version = "v1.16.0"
pruneopts = "NUT"
revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8"
version = "v1.17.0"
[[projects]]
digest = "1:66b0292f815d508d11ed5fe94fdeb0bcc5a988703a08e73bf3cb3a415de676cf"
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "NUT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"auditregistration/v1alpha1",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"autoscaling/v2beta2",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"coordination/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "NUT"
revision = "67edc246be36579e46a89e29a2f165d47e012109"
version = "kubernetes-1.13.2"
[[projects]]
digest = "1:a2da0cbc8dfda27eeffa54b53195e607497c6cac737d17f45a667963aeae5f02"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/clock",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/reflect",
]
pruneopts = ""
pruneopts = "NUT"
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
version = "kubernetes-1.13.0"
version = "kubernetes-1.13.2"
[[projects]]
digest = "1:4f5eb833037cc0ba0bf8fe9cae6be9df62c19dd1c869415275c708daa8ccfda5"
digest = "1:638623327cb201b425a328d0bddb3379b05eb05ef4cab589380f0be07ac1dc17"
name = "k8s.io/client-go"
packages = [
"discovery",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/auditregistration/v1alpha1",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta2",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/coordination/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"rest",
"rest/watch",
"tools/auth",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/reference",
"transport",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
]
pruneopts = "NUT"
revision = "6bf63545bd0257ed9e701ad95307ffa51b4407c0"
version = "kubernetes-1.13.2"
[[projects]]
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
name = "k8s.io/klog"
packages = ["."]
pruneopts = ""
pruneopts = "NUT"
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
version = "v0.1.0"
[[projects]]
digest = "1:a53c39a815a31aceb509f9987d7d8c65e74fcb1fcd5077eaf723a8defec1af90"
digest = "1:cf54450b967dcae4f270dfa44395b01175e5358bbd79fe8a6073b13f220f1c2e"
name = "k8s.io/kubernetes"
packages = [
"pkg/util/file",
@ -185,24 +506,32 @@
"pkg/util/mount",
"pkg/util/nsenter",
]
pruneopts = ""
revision = "ddf47ac13c1a9483ea035a79cd7c10005ff21a6d"
version = "v1.13.0"
pruneopts = "NUT"
revision = "cff46ab41ff0bb44d8584413b598ad8360ec1def"
version = "v1.13.2"
[[projects]]
branch = "master"
digest = "1:bea542e853f98bfcc80ecbe8fe0f32bc52c97664102aacdd7dca676354ef2faa"
digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5"
name = "k8s.io/utils"
packages = ["exec"]
pruneopts = ""
revision = "0d26856f57b32ec3398579285e5c8a2bfe8c5243"
pruneopts = "NUT"
revision = "8a16e7dd8fb6d97d1331b0c79a16722f934b00b1"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/container-storage-interface/spec/lib/go/csi",
"github.com/golang/glog",
"github.com/golang/protobuf/ptypes",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/kubernetes-csi/drivers/pkg/csi-common",
"github.com/pborman/uuid",
@ -210,8 +539,15 @@
"golang.org/x/net/context",
"google.golang.org/grpc/codes",
"google.golang.org/grpc/status",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/api/errors",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/util/sets",
"k8s.io/apimachinery/pkg/util/wait",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/rest",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/klog",
"k8s.io/kubernetes/pkg/util/keymutex",
"k8s.io/kubernetes/pkg/util/mount",
"k8s.io/kubernetes/pkg/util/nsenter",

View File

@ -6,10 +6,6 @@
branch = "master"
name = "github.com/kubernetes-csi/drivers"
[[constraint]]
branch = "master"
name = "github.com/golang/glog"
[[override]]
revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
name = "github.com/docker/distribution"
@ -19,17 +15,26 @@
version = "1.10.0"
[[constraint]]
version = "kubernetes-1.13.0"
version = "kubernetes-1.13.2"
name = "k8s.io/apimachinery"
[[constraint]]
name = "k8s.io/kubernetes"
version = "v1.13.0"
version = "v1.13.2"
[[override]]
version = "kubernetes-1.13.0"
version = "kubernetes-1.13.2"
name = "k8s.io/api"
[[override]]
name = "github.com/golang/protobuf"
version = "1.1.0"
[[constraint]]
name = "k8s.io/client-go"
version = "kubernetes-1.13.2"
[prune]
go-tests = true
non-go = true
unused-packages = true

View File

@ -25,21 +25,26 @@ $(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSIO
all: rbdplugin cephfsplugin
test:
go test github.com/ceph/ceph-csi/pkg/... -cover
go vet github.com/ceph/ceph-csi/pkg/...
test: go-test static-check
go-test:
./scripts/test-go.sh
static-check:
./scripts/lint-go.sh
./scripts/lint-text.sh
rbdplugin:
if [ ! -d ./vendor ]; then dep ensure; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./cmd/rbd
image-rbdplugin: rbdplugin
cp _output/rbdplugin deploy/rbd/docker
docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
cephfsplugin:
if [ ! -d ./vendor ]; then dep ensure; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cephfs
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cmd/cephfs
image-cephfsplugin: cephfsplugin
cp _output/cephfsplugin deploy/cephfs/docker
@ -55,3 +60,5 @@ clean:
go clean -r -x
rm -f deploy/rbd/docker/rbdplugin
rm -f deploy/cephfs/docker/cephfsplugin
rm -f _output/rbdplugin
rm -f _output/cephfsplugin

View File

@ -1,12 +1,22 @@
# Ceph CSI 1.0.0
[Container Storage Interface (CSI)](https://github.com/container-storage-interface/) driver, provisioner, and attacher for Ceph RBD and CephFS.
[Container Storage Interface
(CSI)](https://github.com/container-storage-interface/) driver, provisioner,
and attacher for Ceph RBD and CephFS.
## Overview
Ceph CSI plugins implement an interface between CSI enabled Container Orchestrator (CO) and CEPH cluster. It allows dynamically provisioning CEPH volumes and attaching them to workloads. Current implementation of Ceph CSI plugins was tested in Kubernetes environment (requires Kubernetes 1.11+), but the code does not rely on any Kubernetes specific calls (WIP to make it k8s agnostic) and should be able to run with any CSI enabled CO.
Ceph CSI plugins implement an interface between CSI enabled Container
Orchestrator (CO) and CEPH cluster.
It allows dynamically provisioning CEPH volumes and attaching them to
workloads.
Current implementation of Ceph CSI plugins was tested in Kubernetes
environment (requires Kubernetes 1.13+), but the code does not rely on
any Kubernetes specific calls (WIP to make it k8s agnostic) and
should be able to run with any CSI enabled CO.
For details about configuration and deployment of RBD and CephFS CSI plugins, see documentation in `docs/`.
For details about configuration and deployment of RBD and
CephFS CSI plugins, see documentation in `docs/`.
For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
@ -14,3 +24,6 @@ For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
Please submit an issue at: [Issues](https://github.com/ceph/ceph-csi/issues)
## Slack Channels
Join us at [Rook ceph-csi Channel](https://rook-io.slack.com/messages/CG3HUV94J/details/)

View File

@ -1,60 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"path"
"github.com/ceph/ceph-csi/pkg/cephfs"
"github.com/golang/glog"
)
func init() {
flag.Set("logtostderr", "true")
}
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver")
nodeId = flag.String("nodeid", "", "node id")
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
)
func main() {
flag.Parse()
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil {
glog.Errorf("failed to create persistent storage for controller: %v", err)
os.Exit(1)
}
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil {
glog.Errorf("failed to create persistent storage for node: %v", err)
os.Exit(1)
}
driver := cephfs.NewCephFSDriver()
driver.Run(*driverName, *nodeId, *endpoint, *volumeMounter)
os.Exit(0)
}
func createPersistentStorage(persistentStoragePath string) error {
return os.MkdirAll(persistentStoragePath, os.FileMode(0755))
}

64
cmd/cephfs/main.go Normal file
View File

@ -0,0 +1,64 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"path"
"github.com/ceph/ceph-csi/pkg/cephfs"
"github.com/ceph/ceph-csi/pkg/util"
"k8s.io/klog"
)
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver")
nodeID = flag.String("nodeid", "", "node id")
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
)
func main() {
util.InitLogging()
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil {
klog.Errorf("failed to create persistent storage for controller: %v", err)
os.Exit(1)
}
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil {
klog.Errorf("failed to create persistent storage for node: %v", err)
os.Exit(1)
}
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
if err != nil {
klog.Errorf("failed to define cache persistence method: %v", err)
os.Exit(1)
}
driver := cephfs.NewDriver()
driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, cp)
os.Exit(0)
}
func createPersistentStorage(persistentStoragePath string) error {
return os.MkdirAll(persistentStoragePath, os.FileMode(0755))
}

70
cmd/rbd/main.go Normal file
View File

@ -0,0 +1,70 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"path"
"github.com/ceph/ceph-csi/pkg/rbd"
"github.com/ceph/ceph-csi/pkg/util"
"k8s.io/klog"
)
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "csi-rbdplugin", "name of the driver")
nodeID = flag.String("nodeid", "", "node id")
containerized = flag.Bool("containerized", true, "whether run as containerized")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
)
func main() {
util.InitLogging()
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "controller")); err != nil {
klog.Errorf("failed to create persistent storage for controller %v", err)
os.Exit(1)
}
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "node")); err != nil {
klog.Errorf("failed to create persistent storage for node %v", err)
os.Exit(1)
}
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
if err != nil {
klog.Errorf("failed to define cache persistence method: %v", err)
os.Exit(1)
}
driver := rbd.NewDriver()
driver.Run(*driverName, *nodeID, *endpoint, *containerized, cp)
os.Exit(0)
}
func createPersistentStorage(persistentStoragePath string) error {
if _, err := os.Stat(persistentStoragePath); os.IsNotExist(err) {
if err = os.MkdirAll(persistentStoragePath, os.FileMode(0755)); err != nil {
return err
}
} else {
return err
}
return nil
}

View File

@ -1,18 +1,53 @@
#!/bin/bash
if [ "${TRAVIS_BRANCH}" == 'master' ]; then
export RBD_IMAGE_VERSION='v0.3.0';
export CEPHFS_IMAGE_VERSION='v0.3.0';
export RBD_IMAGE_VERSION='v0.3.0'
export CEPHFS_IMAGE_VERSION='v0.3.0'
elif [ "${TRAVIS_BRANCH}" == 'csi-v1.0' ]; then
export RBD_IMAGE_VERSION='v1.0.0';
export CEPHFS_IMAGE_VERSION='v1.0.0';
export RBD_IMAGE_VERSION='v1.0.0'
export CEPHFS_IMAGE_VERSION='v1.0.0'
else
echo "!!! Branch ${TRAVIS_BRANCH} is not a deployable branch; exiting";
exit 0; # Exiting 0 so that this isn't marked as failing
fi;
echo "!!! Branch ${TRAVIS_BRANCH} is not a deployable branch; exiting"
exit 0 # Exiting 0 so that this isn't marked as failing
fi
if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
make push-image-rbdplugin push-image-cephfsplugin
fi;
set -xe
mkdir -p tmp
pushd tmp >/dev/null
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get >get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
git clone https://github.com/ceph/csi-charts
mkdir -p csi-charts/docs
popd >/dev/null
CHANGED=0
VERSION=$(grep 'version:' deploy/rbd/helm/Chart.yaml | awk '{print $2}')
if [ ! -f "tmp/csi-charts/docs/rbd/ceph-csi-rbd-$VERSION.tgz" ]; then
CHANGED=1
ln -s helm deploy/rbd/ceph-csi-rbd
mkdir -p tmp/csi-charts/docs/rbd
pushd tmp/csi-charts/docs/rbd >/dev/null
helm init --client-only
helm package ../../../../deploy/rbd/ceph-csi-rbd
popd >/dev/null
fi
if [ $CHANGED -eq 1 ]; then
pushd tmp/csi-charts/docs >/dev/null
helm repo index .
git add --all :/ && git commit -m "Update repo"
git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts
popd >/dev/null
fi
fi

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
name: cephfs-csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
name: cephfs-external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
@ -26,12 +27,12 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
name: cephfs-csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
name: cephfs-csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
name: cephfs-external-attacher-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,3 +1,4 @@
---
kind: Service
apiVersion: v1
metadata:
@ -24,7 +25,7 @@ spec:
labels:
app: csi-cephfsplugin-attacher
spec:
serviceAccount: csi-attacher
serviceAccount: cephfs-csi-attacher
containers:
- name: csi-cephfsplugin-attacher
image: quay.io/k8scsi/csi-attacher:v1.0.1
@ -33,13 +34,13 @@ spec:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-cephfsplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate

View File

@ -1,3 +1,4 @@
---
kind: Service
apiVersion: v1
metadata:
@ -24,23 +25,65 @@ spec:
labels:
app: csi-cephfsplugin-provisioner
spec:
serviceAccount: csi-provisioner
serviceAccount: cephfs-csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.0.1
args:
- "--provisioner=csi-cephfsplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-cephfsplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
- name: csi-cephfsplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
image: quay.io/cephcsi/cephfsplugin:v1.0.0
args:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-cephfsplugin"
- "--metadatastorage=k8s_configmap"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
path: /var/lib/kubelet/plugins/csi-cephfsplugin
type: DirectoryOrCreate
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev

View File

@ -1,3 +1,4 @@
---
kind: DaemonSet
apiVersion: apps/v1beta2
metadata:
@ -11,7 +12,7 @@ spec:
labels:
app: csi-cephfsplugin
spec:
serviceAccount: csi-nodeplugin
serviceAccount: cephfs-csi-nodeplugin
hostNetwork: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
@ -21,20 +22,24 @@ spec:
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock"
lifecycle:
preStop:
exec:
command: [
"/bin/sh", "-c",
"rm -rf /registration/csi-cephfsplugin \
/registration/csi-cephfsplugin-reg.sock"
]
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-cephfsplugin/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins_registry/csi-cephfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-cephfsplugin
@ -44,22 +49,27 @@ spec:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephfsplugin:v1.0.0
args :
args:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-cephfsplugin"
- "--metadatastorage=k8s_configmap"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix://var/lib/kubelet/plugins_registry/csi-cephfsplugin/csi.sock
value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
- name: csi-plugins-dir
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
mountPropagation: "Bidirectional"
@ -76,7 +86,7 @@ spec:
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
path: /var/lib/kubelet/plugins/csi-cephfsplugin/
type: DirectoryOrCreate
- name: csi-plugins-dir
hostPath:
@ -90,10 +100,6 @@ spec:
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-cephfsplugin
type: DirectoryOrCreate
- name: host-sys
hostPath:
path: /sys

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin
name: cephfs-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
name: cephfs-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
@ -21,17 +22,20 @@ rules:
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: csi-nodeplugin
name: cephfs-csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-nodeplugin
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
name: cephfs-csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
name: cephfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
@ -24,17 +25,20 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
name: cephfs-csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@ -0,0 +1,14 @@
---
apiVersion: v1
appVersion: "1.0.0"
description: "Container Storage Interface (CSI) driver,
provisioner, snapshotter, and attacher for Ceph RBD"
name: ceph-csi-rbd
version: 0.4.0
keywords:
- ceph
- rbd
- ceph-csi
home: https://github.com/ceph/ceph-csi
sources:
- https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/rbd/helm

24
deploy/rbd/helm/README.md Normal file
View File

@ -0,0 +1,24 @@
# ceph-csi-rbd
The ceph-csi-rbd chart adds rbd volume support to your cluster.
## Install Chart
To install the Chart into your Kubernetes cluster :
```bash
helm install --namespace "ceph-csi-rbd" --name "ceph-csi-rbd" ceph-csi/ceph-csi-rbd
```
After installation succeeds, you can get a status of Chart
```bash
helm status "ceph-csi-rbd"
```
If you want to delete your Chart, use this command:
```bash
helm delete --purge "ceph-csi-rbd"
```

View File

@ -0,0 +1,2 @@
Examples on how to configure a storage class and start using the driver are here:
https://github.com/ceph/ceph-csi/tree/csi-v1.0/examples/rbd

View File

@ -0,0 +1,119 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ceph-csi-rbd.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.attacher.fullname" -}}
{{- if .Values.attacher.fullnameOverride -}}
{{- .Values.attacher.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.nodeplugin.fullname" -}}
{{- if .Values.nodeplugin.fullnameOverride -}}
{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ceph-csi-rbd.provisioner.fullname" -}}
{{- if .Values.provisioner.fullnameOverride -}}
{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ceph-csi-rbd.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-rbd.serviceAccountName.attacher" -}}
{{- if .Values.serviceAccounts.attacher.create -}}
{{ default (include "ceph-csi-rbd.attacher.fullname" .) .Values.serviceAccounts.attacher.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.attacher.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-rbd.serviceAccountName.nodeplugin" -}}
{{- if .Values.serviceAccounts.nodeplugin.create -}}
{{ default (include "ceph-csi-rbd.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.nodeplugin.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "ceph-csi-rbd.serviceAccountName.provisioner" -}}
{{- if .Values.serviceAccounts.provisioner.create -}}
{{ default (include "ceph-csi-rbd.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.provisioner.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,25 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.attacher.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.attacher.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.attacher" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.attacher.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,18 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-rbd.attacher.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
ports:
- name: dummy
port: 12345

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccounts.attacher.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-rbd.serviceAccountName.attacher" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,60 @@
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: {{ include "ceph-csi-rbd.attacher.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceName: {{ include "ceph-csi-rbd.attacher.fullname" . }}
replicas: {{ .Values.attacher.replicas }}
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.attacher.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.attacher" . }}
containers:
- name: csi-rbdplugin-attacher
image: "{{ .Values.attacher.image.repository }}:{{ .Values.attacher.image.tag }}"
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.attacher.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
resources:
{{ toYaml .Values.attacher.resources | indent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.socketDir }}
type: DirectoryOrCreate
{{- if .Values.attacher.affinity -}}
affinity:
{{ toYaml .Values.attacher.affinity . | indent 8 }}
{{- end -}}
{{- if .Values.attacher.nodeSelector -}}
nodeSelector:
{{ toYaml .Values.attacher.nodeSelector | indent 8 }}
{{- end -}}
{{- if .Values.attacher.tolerations -}}
tolerations:
{{ toYaml .Values.attacher.tolerations | indent 8 }}
{{- end -}}

View File

@ -0,0 +1,28 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,140 @@
kind: DaemonSet
apiVersion: apps/v1beta2
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
args:
- "--v=5"
- "--csi-address=/csi/{{ .Values.socketFile }}"
- "--kubelet-registration-path={{ .Values.socketDir }}/{{ .Values.socketFile }}"
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi-rbdplugin /registration/csi-rbdplugin-reg.sock"]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }}
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }}
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-rbdplugin"
- "--containerized=true"
- "--metadatastorage=k8s_configmap"
env:
- name: HOST_ROOTFS
value: "/rootfs"
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
volumeMounts:
- name: plugin-dir
mountPath: {{ .Values.socketDir }}
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: plugin-mount-dir
mountPath: {{ .Values.volumeDevicesDir }}
mountPropagation: "Bidirectional"
- mountPath: /dev
name: host-dev
- mountPath: /rootfs
name: host-rootfs
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
- name: plugin-dir
hostPath:
path: {{ .Values.socketDir }}
type: DirectoryOrCreate
- name: plugin-mount-dir
hostPath:
path: {{ .Values.volumeDevicesDir }}
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: {{ .Values.registrationDir }}
type: Directory
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-rootfs
hostPath:
path: /
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
{{- if .Values.nodeplugin.affinity -}}
affinity:
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }}
{{- end -}}
{{- if .Values.nodeplugin.nodeSelector -}}
nodeSelector:
{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 }}
{{- end -}}
{{- if .Values.nodeplugin.tolerations -}}
tolerations:
{{ toYaml .Values.nodeplugin.tolerations | indent 8 }}
{{- end -}}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccounts.nodeplugin.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-rbd.serviceAccountName.nodeplugin" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,46 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "create", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create"]
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if .Values.rbac.create -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "delete"]
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.rbac.create -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,18 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
ports:
- name: dummy
port: 12345

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccounts.provisioner.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,110 @@
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceName: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
replicas: {{ .Values.provisioner.replicas }}
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.provisioner.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
resources:
{{ toYaml .Values.provisioner.resources | indent 12 }}
- name: csi-snapshotter
image: {{ .Values.snapshotter.image.repository }}:{{ .Values.snapshotter.image.tag }}
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--connection-timeout=15s"
- "--v=5"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
resources:
{{ toYaml .Values.snapshotter.resources | indent 12 }}
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
args :
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-rbdplugin"
- "--containerized=true"
- "--metadatastorage=k8s_configmap"
env:
- name: HOST_ROOTFS
value: "/rootfs"
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
- name: host-rootfs
mountPath: "/rootfs"
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
- name: socket-dir
emptyDir: {}
#FIXME this seems way too much. Why is it needed at all for this?
- name: host-rootfs
hostPath:
path: /
{{- if .Values.provisioner.affinity -}}
affinity:
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
{{- end -}}
{{- if .Values.provisioner.nodeSelector -}}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}
{{- end -}}
{{- if .Values.provisioner.tolerations -}}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 }}
{{- end -}}

View File

@ -0,0 +1,88 @@
---
rbac:
create: true
serviceAccounts:
attacher:
create: true
name:
nodeplugin:
create: true
name:
provisioner:
create: true
name:
socketDir: /var/lib/kubelet/plugins/csi-rbdplugin
socketFile: csi.sock
registrationDir: /var/lib/kubelet/plugins_registry
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
attacher:
name: attacher
replicaCount: 1
image:
repository: quay.io/k8scsi/csi-attacher
tag: v1.0.1
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
nodeplugin:
name: nodeplugin
registrar:
image:
repository: quay.io/k8scsi/csi-node-driver-registrar
tag: v1.0.2
pullPolicy: IfNotPresent
resources: {}
plugin:
image:
repository: quay.io/cephcsi/rbdplugin
tag: v1.0.0
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
provisioner:
name: provisioner
replicaCount: 1
image:
repository: quay.io/k8scsi/csi-provisioner
tag: v1.0.1
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
snapshotter:
image:
repository: quay.io/k8scsi/csi-snapshotter
tag: v1.0.1
pullPolicy: IfNotPresent
resources: {}

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
name: rbd-csi-attacher
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
name: rbd-external-attacher-runner
rules:
- apiGroups: [""]
resources: ["events"]
@ -26,12 +27,12 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
name: rbd-csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
name: rbd-csi-attacher
namespace: default
roleRef:
kind: ClusterRole
name: external-attacher-runner
name: rbd-external-attacher-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin
name: rbd-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
@ -21,17 +22,20 @@ rules:
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: csi-nodeplugin
name: rbd-csi-nodeplugin
namespace: default
roleRef:
kind: ClusterRole
name: csi-nodeplugin
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,13 +1,14 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
name: rbd-csi-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
@ -24,17 +25,35 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "create", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
name: rbd-csi-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: external-provisioner-runner
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,3 +1,4 @@
---
kind: Service
apiVersion: v1
metadata:
@ -24,7 +25,7 @@ spec:
labels:
app: csi-rbdplugin-attacher
spec:
serviceAccount: csi-attacher
serviceAccount: rbd-csi-attacher
containers:
- name: csi-rbdplugin-attacher
image: quay.io/k8scsi/csi-attacher:v1.0.1
@ -33,13 +34,13 @@ spec:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-rbdplugin
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-rbdplugin
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -1,3 +1,4 @@
---
kind: Service
apiVersion: v1
metadata:
@ -24,23 +25,88 @@ spec:
labels:
app: csi-rbdplugin-provisioner
spec:
serviceAccount: csi-provisioner
serviceAccount: rbd-csi-provisioner
containers:
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.0.1
args:
- "--provisioner=csi-rbdplugin"
- "--csi-address=$(ADDRESS)"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-rbdplugin
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v1.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--connection-timeout=15s"
- "--v=5"
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
image: quay.io/cephcsi/rbdplugin:v1.0.0
args:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-rbdplugin"
- "--containerized=true"
- "--metadatastorage=k8s_configmap"
env:
- name: HOST_ROOTFS
value: "/rootfs"
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix://var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
- mountPath: /dev
name: host-dev
- mountPath: /rootfs
name: host-rootfs
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-rootfs
hostPath:
path: /
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-rbdplugin
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate

View File

@ -1,3 +1,4 @@
---
kind: DaemonSet
apiVersion: apps/v1beta2
metadata:
@ -11,7 +12,7 @@ spec:
labels:
app: csi-rbdplugin
spec:
serviceAccount: csi-nodeplugin
serviceAccount: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
@ -22,20 +23,24 @@ spec:
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-rbdplugin/csi.sock"
lifecycle:
preStop:
exec:
command: [
"/bin/sh", "-c",
"rm -rf /registration/csi-rbdplugin \
/registration/csi-rbdplugin-reg.sock"
]
env:
- name: ADDRESS
value: /var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /var/lib/kubelet/plugins_registry/csi-rbdplugin
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
@ -45,12 +50,13 @@ spec:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/rbdplugin:v1.0.0
args :
args:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=csi-rbdplugin"
- "--containerized=true"
- "--metadatastorage=k8s_configmap"
env:
- name: HOST_ROOTFS
value: "/rootfs"
@ -58,6 +64,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix://var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent"
@ -67,6 +77,9 @@ spec:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: plugin-mount-dir
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/
mountPropagation: "Bidirectional"
- mountPath: /dev
name: host-dev
- mountPath: /rootfs
@ -79,7 +92,11 @@ spec:
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-rbdplugin
path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate
- name: plugin-mount-dir
hostPath:
path: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
@ -89,10 +106,6 @@ spec:
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/csi-rbdplugin
type: DirectoryOrCreate
- name: host-dev
hostPath:
path: /dev

View File

@ -1,19 +1,26 @@
# CSI CephFS plugin
The CSI CephFS plugin is able to both provision new CephFS volumes and attach and mount existing ones to workloads.
The CSI CephFS plugin is able to both provision new CephFS volumes
and attach and mount existing ones to workloads.
## Building
CSI CephFS plugin can be compiled in a form of a binary file or in a form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `cephfsplugin`. When compiled as an image, it's stored in the local Docker image store.
CSI CephFS plugin can be compiled in a form of a binary file or in a form of a
Docker image.
When compiled as a binary file, the result is stored in `_output/`
directory with the name `cephfsplugin`.
When compiled as an image, it's stored in the local Docker image store.
Building binary:
```bash
$ make cephfsplugin
make cephfsplugin
```
Building Docker image:
```bash
$ make image-cephfsplugin
make image-cephfsplugin
```
## Configuration
@ -21,74 +28,98 @@ $ make image-cephfsplugin
**Available command line arguments:**
Option | Default value | Description
------ | ------------- | -----------
--------------------|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket
`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
`--nodeid` | _empty_ | This node's ID
`--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed.
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
**Available environmental variables:** `KUBERNETES_CONFIG_PATH`: if you use
`k8s_configmap` as metadata store, specify the path of your k8s config file (if
not specified, the plugin will assume you're running it inside a k8s cluster and
find the config itself).
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE`
is used to define in which namespace you want the configmaps to be stored
**Available volume parameters:**
Parameter | Required | Description
--------- | -------- | -----------
----------------------------------------------------------------------------------------------------|--------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
`monitors` | yes | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors. If both `monitors` and `monValueFromSecret` are set and the monitors set in the secret exists, `monValueFromSecret` takes precedence.
`mounter` | no | Mount method to be used for this volume. Available options are `kernel` for Ceph kernel client and `fuse` for Ceph FUSE driver. Defaults to "default mounter", see command line arguments.
`provisionVolume` | yes | Mode of operation. BOOL value. If `true`, a new CephFS volume will be provisioned. If `false`, an existing volume will be used.
`pool` | for `provisionVolume=true` | Ceph pool into which the volume shall be created
`rootPath` | for `provisionVolume=false` | Root path of an existing CephFS volume
`csiProvisionerSecretName`, `csiNodeStageSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
`csiProvisionerSecretNamespace`, `csiNodeStageSecretNamespace` | for Kubernetes | namespaces of the above Secret objects
`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-stage-secret-namespace` | for Kubernetes | namespaces of the above Secret objects
**Required secrets for `provisionVolume=true`:**
Admin credentials are required for provisioning new volumes
* `adminID`: ID of an admin client
* `adminKey`: key of the admin client
**Required secrets for `provisionVolume=false`:**
User credentials with access to an existing volume
* `userID`: ID of a user client
* `userKey`: key of a user client
Notes on volume size: when provisioning a new volume, `max_bytes` quota attribute for this volume will be set to the requested volume size (see [Ceph quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request for a zero-sized volume means no quota attribute will be set.
Notes on volume size: when provisioning a new volume, `max_bytes` quota
attribute for this volume will be set to the requested volume size (see [Ceph
quota documentation](http://docs.ceph.com/docs/mimic/cephfs/quota/)). A request
for a zero-sized volume means no quota attribute will be set.
## Deployment with Kubernetes
Requires Kubernetes 1.11
Requires Kubernetes 1.13
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged` flag must be set to true for both the API server and the kubelet). Moreover, as stated in the [mount propagation docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation), the Docker daemon of the cluster nodes must allow shared mounts.
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
flag must be set to true for both the API server and the kubelet). Moreover, as
stated in the [mount propagation
docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation),
the Docker daemon of the cluster nodes must allow shared mounts.
YAML manifests are located in `deploy/cephfs/kubernetes`.
**Deploy RBACs for sidecar containers and node plugins:**
```bash
$ kubectl create -f csi-attacher-rbac.yaml
$ kubectl create -f csi-provisioner-rbac.yaml
$ kubectl create -f csi-nodeplugin-rbac.yaml
kubectl create -f csi-attacher-rbac.yaml
kubectl create -f csi-provisioner-rbac.yaml
kubectl create -f csi-nodeplugin-rbac.yaml
```
Those manifests deploy service accounts, cluster roles and cluster role bindings. These are shared for both RBD and CephFS CSI plugins, as they require the same permissions.
Those manifests deploy service accounts, cluster roles and cluster role
bindings. These are shared for both RBD and CephFS CSI plugins, as they require
the same permissions.
**Deploy CSI sidecar containers:**
```bash
$ kubectl create -f csi-cephfsplugin-attacher.yaml
$ kubectl create -f csi-cephfsplugin-provisioner.yaml
kubectl create -f csi-cephfsplugin-attacher.yaml
kubectl create -f csi-cephfsplugin-provisioner.yaml
```
Deploys stateful sets for external-attacher and external-provisioner sidecar containers for CSI CephFS.
Deploys stateful sets for external-attacher and external-provisioner
sidecar containers for CSI CephFS.
**Deploy CSI CephFS driver:**
```bash
$ kubectl create -f csi-cephfsplugin.yaml
kubectl create -f csi-cephfsplugin.yaml
```
Deploys a daemon set with two containers: CSI driver-registrar and the CSI CephFS driver.
Deploys a daemon set with two containers: CSI driver-registrar and
the CSI CephFS driver.
## Verifying the deployment in Kubernetes
After successfuly completing the steps above, you should see output similar to this:
After successfully completing the steps above, you should see output similar to this:
```bash
$ kubectl get all
NAME READY STATUS RESTARTS AGE
@ -107,5 +138,7 @@ You can try deploying a demo pod from `examples/cephfs` to test the deployment f
### Notes on volume deletion
Volumes that were provisioned dynamically (i.e. `provisionVolume=true`) are allowed to be deleted by the driver as well, if the user chooses to do so. Otherwise, the driver is forbidden to delete such volumes - attempting to delete them is a no-op.
Volumes that were provisioned dynamically (i.e. `provisionVolume=true`) are
allowed to be deleted by the driver as well, if the user chooses to do
so.Otherwise, the driver is forbidden to delete such volumes - attempting to
delete them is a no-op.

View File

@ -1,19 +1,25 @@
# CSI RBD Plugin
The RBD CSI plugin is able to provision new RBD images and attach and mount those to worlkoads.
The RBD CSI plugin is able to provision new RBD images and
attach and mount those to worlkoads.
## Building
CSI RBD plugin can be compiled in a form of a binary file or in a form of a Docker image. When compiled as a binary file, the result is stored in `_output/` directory with the name `rbdplugin`. When compiled as an image, it's stored in the local Docker image store.
CSI RBD plugin can be compiled in a form of a binary file or in a form of a
Docker image. When compiled as a binary file, the result is stored in
`_output/` directory with the name `rbdplugin`. When compiled as an image, it's
stored in the local Docker image store.
Building binary:
```bash
$ make rbdplugin
make rbdplugin
```
Building Docker image:
```bash
$ make image-rbdplugin
make image-rbdplugin
```
## Configuration
@ -26,11 +32,19 @@ Option | Default value | Description
`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
`--nodeid` | _empty_ | This node's ID
`--containerized` | true | Whether running in containerized mode
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
**Available environmental variables:**
`HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`.
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
the path of your k8s config file (if not specified, the plugin will assume
you're running it inside a k8s cluster and find the config itself).
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store,
`POD_NAMESPACE` is used to define in which namespace you want
the configmaps to be stored
**Available volume parameters:**
Parameter | Required | Description
@ -40,13 +54,15 @@ Parameter | Required | Description
`pool` | yes | Ceph pool into which the RBD image shall be created
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
`csiProvisionerSecretName`, `csiNodePublishSecretName` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
`csiProvisionerSecretNamespace`, `csiNodePublishSecretNamespace` | for Kubernetes | namespaces of the above Secret objects
`csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-publish-secret-name` | for Kubernetes | name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects
`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images
**Required secrets:**
Admin credentials are required for provisioning new RBD images
`ADMIN_NAME`: `ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the client with admin privileges, and the value is its password
Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
client with admin privileges, and the value is its password
Also note that CSI RBD expects admin keyring and Ceph config file in `/etc/ceph`.
@ -54,33 +70,40 @@ Also note that CSI RBD expects admin keyring and Ceph config file in `/etc/ceph`
Requires Kubernetes 1.11
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged` flag must be set to true for both the API server and the kubelet). Moreover, as stated in the [mount propagation docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation), the Docker daemon of the cluster nodes must allow shared mounts.
Your Kubernetes cluster must allow privileged pods (i.e. `--allow-privileged`
flag must be set to true for both the API server and the kubelet). Moreover, as
stated in the [mount propagation
docs](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation),
the Docker daemon of the cluster nodes must allow shared mounts.
YAML manifests are located in `deploy/rbd/kubernetes`.
**Deploy RBACs for sidecar containers and node plugins:**
```bash
$ kubectl create -f csi-attacher-rbac.yaml
$ kubectl create -f csi-provisioner-rbac.yaml
$ kubectl create -f csi-nodeplugin-rbac.yaml
kubectl create -f csi-attacher-rbac.yaml
kubectl create -f csi-provisioner-rbac.yaml
kubectl create -f csi-nodeplugin-rbac.yaml
```
Those manifests deploy service accounts, cluster roles and cluster role bindings. These are shared for both RBD and CephFS CSI plugins, as they require the same permissions.
Those manifests deploy service accounts, cluster roles and cluster role
bindings. These are shared for both RBD and CephFS CSI plugins, as they require
the same permissions.
**Deploy CSI sidecar containers:**
```bash
$ kubectl create -f csi-rbdplugin-attacher.yaml
$ kubectl create -f csi-rbdplugin-provisioner.yaml
kubectl create -f csi-rbdplugin-attacher.yaml
kubectl create -f csi-rbdplugin-provisioner.yaml
```
Deploys stateful sets for external-attacher and external-provisioner sidecar containers for CSI RBD.
Deploys stateful sets for external-attacher and external-provisioner
sidecar containers for CSI RBD.
**Deploy RBD CSI driver:**
```bash
$ kubectl create -f csi-rbdplugin.yaml
kubectl create -f csi-rbdplugin.yaml
```
Deploys a daemon set with two containers: CSI driver-registrar and the CSI RBD driver.
@ -105,3 +128,19 @@ service/csi-rbdplugin-provisioner ClusterIP 10.104.2.130 <none> 123
You can try deploying a demo pod from `examples/rbd` to test the deployment further.
## Deployment with Helm
The same requirements from the Kubernetes section apply here, i.e. Kubernetes
version, privileged flag and shared mounts.
The Helm chart is located in `deploy/rbd/helm`.
**Deploy Helm Chart:**
```bash
helm install ./deploy/rbd/helm
```
The Helm chart deploys all of the required resources to use the CSI RBD driver.
After deploying the chart you can verify the deployment using the instructions
above for verifying the deployment with Kubernetes

View File

@ -1,17 +1,116 @@
## How to test RBD and CephFS plugins with Kubernetes 1.11
# How to test RBD and CephFS plugins with Kubernetes 1.13
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and `plugin-teardown.sh` helper scripts. You can use those to help you deploy/tear down RBACs, sidecar containers and the plugin in one go. By default, they look for the YAML manifests in `../../deploy/{rbd,cephfs}/kubernetes`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and
`plugin-teardown.sh` helper scripts. You can use those to help you
deploy/teardown RBACs, sidecar containers and the plugin in one go.
By default, they look for the YAML manifests in
`../../deploy/{rbd,cephfs}/kubernetes`.
You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
Once the plugin is successfuly deployed, you'll need to customize `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster setup. Please consult the documentation for info about available parameters.
Once the plugin is successfuly deployed, you'll need to customize
`storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster
setup.
Please consult the documentation for info about available parameters.
After configuring the secrets, monitors, etc. you can deploy a
testing Pod mounting a RBD image / CephFS volume:
After configuring the secrets, monitors, etc. you can deploy a testing Pod mounting a RBD image / CephFS volume:
```bash
$ kubectl create -f secret.yaml
$ kubectl create -f storageclass.yaml
$ kubectl create -f pvc.yaml
$ kubectl create -f pod.yaml
kubectl create -f secret.yaml
kubectl create -f storageclass.yaml
kubectl create -f pvc.yaml
kubectl create -f pod.yaml
```
Other helper scripts:
* `logs.sh` output of the plugin
* `exec-bash.sh` logs into the plugin's container and runs bash
## How to test RBD Snapshot feature
Before continuing, make sure you enabled the required
feature gate `VolumeSnapshotDataSource=true` in your Kubernetes cluster.
In the `examples/rbd` directory you will find two files related to snapshots:
[snapshotclass.yaml](./rbd/snapshotclass.yaml) and
[snapshot.yaml](./rbd/snapshot.yaml).
Once you created your RBD volume, you'll need to customize at least
`snapshotclass.yaml` and make sure the `monitors` and `pool` parameters match
your Ceph cluster setup.
If you followed the documentation to create the rbdplugin, you shouldn't
have to edit any other file.
After configuring everything you needed, deploy the snapshot class:
```bash
kubectl create -f snapshotclass.yaml
```
Verify that the snapshot class was created:
```console
$ kubectl get volumesnapshotclass
NAME AGE
csi-rbdplugin-snapclass 4s
```
Create a snapshot from the existing PVC:
```bash
kubectl create -f snapshot.yaml
```
To verify if your volume snapshot has successfully been created, run the following:
```console
$ kubectl get volumesnapshot
NAME AGE
rbd-pvc-snapshot 6s
```
To check the status of the snapshot, run the following:
```bash
$ kubectl describe volumesnapshot rbd-pvc-snapshot
Name: rbd-pvc-snapshot
Namespace: default
Labels: <none>
Annotations: <none>
API Version: snapshot.storage.k8s.io/v1alpha1
Kind: VolumeSnapshot
Metadata:
Creation Timestamp: 2019-02-06T08:52:34Z
Finalizers:
snapshot.storage.kubernetes.io/volumesnapshot-protection
Generation: 5
Resource Version: 84239
Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/namespaces/default/volumesnapshots/rbd-pvc-snapshot
UID: 8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
Spec:
Snapshot Class Name: csi-rbdplugin-snapclass
Snapshot Content Name: snapcontent-8b9b5740-29ec-11e9-8e0f-b8ca3aad030b
Source:
API Group: <nil>
Kind: PersistentVolumeClaim
Name: rbd-pvc
Status:
Creation Time: 2019-02-06T08:52:34Z
Ready To Use: true
Restore Size: 1Gi
Events: <none>
```
To be sure everything is OK you can run `rbd snap ls [your-pvc-name]` inside
one of your Ceph pod.
To restore the snapshot to a new PVC, deploy
[pvc-restore.yaml](./rbd/pvc-restore.yaml) and a testing pod
[pod-restore.yaml](./rbd/pvc-restore.yaml):
```bash
kubectl create -f pvc-restore.yaml
kubectl create -f pod-restore.yaml
```

View File

@ -1,3 +1,4 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -4,7 +4,7 @@ CONTAINER_NAME=csi-cephfsplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
}
while [[ "$(get_pod_status)" != "Running" ]]; do
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
kubectl exec -it "${POD_NAME#*/}" -c "$CONTAINER_NAME" bash

View File

@ -4,7 +4,7 @@ CONTAINER_NAME=csi-cephfsplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
}
while [[ "$(get_pod_status)" != "Running" ]]; do
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
kubectl logs -f "$POD_NAME" -c "$CONTAINER_NAME"

View File

@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin)
for obj in ${objects[@]}; do
for obj in "${objects[@]}"; do
kubectl create -f "./$obj.yaml"
done

View File

@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
for obj in ${objects[@]}; do
for obj in "${objects[@]}"; do
kubectl delete -f "./$obj.yaml"
done

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: Pod
metadata:
@ -14,4 +15,3 @@ spec:
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: Secret
metadata:

View File

@ -1,3 +1,4 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
@ -13,7 +14,8 @@ parameters:
# Requires admin credentials (adminID, adminKey).
# For provisionVolume: "false":
# It is assumed the volume already exists and the user is expected
# to provide path to that volume (rootPath) and user credentials (userID, userKey).
# to provide path to that volume (rootPath) and user credentials
# (userID, userKey).
provisionVolume: "true"
# Ceph pool into which the volume shall be created
@ -25,13 +27,16 @@ parameters:
# rootPath: /absolute/path
# The secrets have to contain user and/or Ceph admin credentials.
csiProvisionerSecretName: csi-cephfs-secret
csiProvisionerSecretNamespace: default
csiNodeStageSecretName: csi-cephfs-secret
csiNodeStageSecretNamespace: default
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
# or by setting the default mounter explicitly via --volumemounter command-line argument.
# (optional) The driver can use either ceph-fuse (fuse) or
# ceph kernelclient (kernel).
# If omitted, default volume mounter will be used - this is
# determined by probing for ceph-fuse
# or by setting the default mounter explicitly via
# --volumemounter command-line argument.
# mounter: kernel
reclaimPolicy: Delete

View File

@ -4,7 +4,7 @@ CONTAINER_NAME=csi-rbdplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
}
while [[ "$(get_pod_status)" != "Running" ]]; do
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash
kubectl exec -it "${POD_NAME#*/}" -c "$CONTAINER_NAME" bash

View File

@ -4,7 +4,7 @@ CONTAINER_NAME=csi-rbdplugin
POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1)
function get_pod_status() {
echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}")
echo -n "$(kubectl get "$POD_NAME" -o jsonpath="{.status.phase}")"
}
while [[ "$(get_pod_status)" != "Running" ]]; do
@ -12,4 +12,4 @@ while [[ "$(get_pod_status)" != "Running" ]]; do
echo "Waiting for $POD_NAME (status $(get_pod_status))"
done
kubectl logs -f $POD_NAME -c $CONTAINER_NAME
kubectl logs -f "$POD_NAME" -c "$CONTAINER_NAME"

View File

@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin)
for obj in ${objects[@]}; do
for obj in "${objects[@]}"; do
kubectl create -f "./$obj.yaml"
done

View File

@ -10,6 +10,6 @@ cd "$deployment_base" || exit 1
objects=(csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac)
for obj in ${objects[@]}; do
for obj in "${objects[@]}"; do
kubectl delete -f "./$obj.yaml"
done

View File

@ -0,0 +1,17 @@
---
apiVersion: v1
kind: Pod
metadata:
name: csirbd-restore-demo-pod
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc-restore
readOnly: false

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: Pod
metadata:
@ -14,4 +15,3 @@ spec:
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false

View File

@ -0,0 +1,16 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc-restore
spec:
storageClassName: csi-rbd
dataSource:
name: rbd-pvc-snapshot
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:

View File

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: Secret
metadata:
@ -10,4 +11,4 @@ data:
kubernetes: BASE64-ENCODED-PASSWORD
# if monValueFromSecret is set to "monitors", uncomment the
# following and set the mon there
#monitors: BASE64-ENCODED-Comma-Delimited-Mons
# monitors: BASE64-ENCODED-Comma-Delimited-Mons

View File

@ -0,0 +1,10 @@
---
apiVersion: snapshot.storage.k8s.io/v1alpha1
kind: VolumeSnapshot
metadata:
name: rbd-pvc-snapshot
spec:
snapshotClassName: csi-rbdplugin-snapclass
source:
name: rbd-pvc
kind: PersistentVolumeClaim

View File

@ -0,0 +1,11 @@
---
apiVersion: snapshot.storage.k8s.io/v1alpha1
kind: VolumeSnapshotClass
metadata:
name: csi-rbdplugin-snapclass
snapshotter: csi-rbdplugin
parameters:
pool: rbd
monitors: mon1:port,mon2:port,...
csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
csi.storage.k8s.io/snapshotter-secret-namespace: default

View File

@ -1,3 +1,4 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
@ -11,8 +12,7 @@ parameters:
# if "monitors" parameter is not set, driver to get monitors from same
# secret as admin/user credentials. "monValueFromSecret" provides the
# key in the secret whose value is the mons
#monValueFromSecret: "monitors"
# monValueFromSecret: "monitors"
# Ceph pool into which the RBD image shall be created
pool: rbd
@ -20,18 +20,19 @@ parameters:
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
# RBD image features. Available for imageFormat: "2"
# CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets have to contain Ceph admin credentials.
csiProvisionerSecretName: csi-rbd-secret
csiProvisionerSecretNamespace: default
csiNodePublishSecretName: csi-rbd-secret
csiNodePublishSecretNamespace: default
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-publish-secret-namespace: default
# Ceph users for operating RBD
adminid: admin
userid: kubernetes
# uncomment the following to use rbd-nbd as mounter on supported nodes
#mounter: rbd-nbd
# mounter: rbd-nbd
reclaimPolicy: Delete

View File

@ -18,36 +18,37 @@ package cephfs
import (
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
"k8s.io/klog"
)
const cephConfig = `[global]
mon_host = {{.Monitors}}
var cephConfig = []byte(`[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
`
`)
const cephKeyring = `[client.{{.UserId}}]
const cephKeyring = `[client.{{.UserID}}]
key = {{.Key}}
`
const cephSecret = `{{.Key}}`
const cephSecret = `{{.Key}}` // #nosec
const (
cephConfigRoot = "/etc/ceph"
cephConfigFileNameFmt = "ceph.share.%s.conf"
cephConfigPath = "/etc/ceph/ceph.conf"
cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring"
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret"
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret" // #nosec
)
var (
cephConfigTempl *template.Template
cephKeyringTempl *template.Template
cephSecretTempl *template.Template
)
@ -63,22 +64,24 @@ func init() {
},
}
cephConfigTempl = template.Must(template.New("config").Parse(cephConfig))
cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring))
cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret))
}
type cephConfigWriter interface {
writeToFile() error
func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
}
type cephConfigData struct {
Monitors string
VolumeID volumeID
func writeCephConfig() error {
if err := createCephConfigRoot(); err != nil {
return err
}
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640)
}
func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error {
if err := os.MkdirAll(cephConfigRoot, 0755); err != nil {
if err := createCephConfigRoot(); err != nil {
return err
}
@ -90,41 +93,37 @@ func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, dat
return err
}
defer f.Close()
defer func() {
if err := f.Close(); err != nil {
klog.Errorf("failed to close file %s with error %s", f.Name(), err)
}
}()
return t.Execute(f, data)
}
func (d *cephConfigData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephConfigFileNameFmt, d.VolumeID), 0640, cephConfigTempl, d)
}
type cephKeyringData struct {
UserId, Key string
UserID, Key string
VolumeID volumeID
}
func (d *cephKeyringData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserId), 0600, cephKeyringTempl, d)
return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserID), 0600, cephKeyringTempl, d)
}
type cephSecretData struct {
UserId, Key string
UserID, Key string
VolumeID volumeID
}
func (d *cephSecretData) writeToFile() error {
return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserId), 0600, cephSecretTempl, d)
return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserID), 0600, cephSecretTempl, d)
}
func getCephSecretPath(volId volumeID, userId string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volId, userId))
func getCephSecretPath(volID volumeID, userID string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volID, userID))
}
func getCephKeyringPath(volId volumeID, userId string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volId, userId))
}
func getCephConfPath(volId volumeID) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephConfigFileNameFmt, volId))
func getCephKeyringPath(volID volumeID, userID string) string {
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID))
}

View File

@ -21,6 +21,8 @@ import (
"encoding/json"
"fmt"
"os"
"k8s.io/klog"
)
const (
@ -47,16 +49,17 @@ func (ent *cephEntity) toCredentials() *credentials {
}
}
func getCephUserName(volId volumeID) string {
return cephUserPrefix + string(volId)
func getCephUserName(volID volumeID) string {
return cephUserPrefix + string(volID)
}
func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) {
entityName := cephEntityClientPrefix + getCephUserName(volId)
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
entityName := cephEntityClientPrefix + getCephUserName(volID)
var ents []cephEntity
args := [...]string{
"auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
"-m", volOptions.Monitors,
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"get", entityName,
}
@ -69,7 +72,7 @@ func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) {
// Contains non-json data: "exported keyring for ENTITY\n\n"
offset := bytes.Index(out, []byte("[{"))
if json.NewDecoder(bytes.NewReader(out[offset:])).Decode(&ents); err != nil {
if err = json.NewDecoder(bytes.NewReader(out[offset:])).Decode(&ents); err != nil {
return nil, fmt.Errorf("failed to decode json: %v", err)
}
@ -80,43 +83,53 @@ func getCephUser(adminCr *credentials, volId volumeID) (*cephEntity, error) {
return &ents[0], nil
}
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volId volumeID) (*cephEntity, error) {
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
caps := cephEntityCaps{
Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPath_ceph(volId)),
Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)),
Mon: "allow r",
Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volId)),
Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)),
}
var ents []cephEntity
args := [...]string{
"auth", "-f", "json", "-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
"get-or-create", cephEntityClientPrefix + getCephUserName(volId),
"-m", volOptions.Monitors,
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"get-or-create", cephEntityClientPrefix + getCephUserName(volID),
"mds", caps.Mds,
"mon", caps.Mon,
"osd", caps.Osd,
}
if err := execCommandJson(&ents, "ceph", args[:]...); err != nil {
if err := execCommandJSON(&ents, args[:]...); err != nil {
return nil, fmt.Errorf("error creating ceph user: %v", err)
}
return &ents[0], nil
}
func deleteCephUser(adminCr *credentials, volId volumeID) error {
userId := getCephUserName(volId)
func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
userID := getCephUserName(volID)
args := [...]string{
"-c", getCephConfPath(volId), "-n", cephEntityClientPrefix + adminCr.id,
"auth", "rm", cephEntityClientPrefix + userId,
"-m", volOptions.Monitors,
"-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
"auth", "rm", cephEntityClientPrefix + userID,
}
if err := execCommandAndValidate("ceph", args[:]...); err != nil {
var err error
if err = execCommandAndValidate("ceph", args[:]...); err != nil {
return err
}
os.Remove(getCephKeyringPath(volId, userId))
os.Remove(getCephSecretPath(volId, userId))
keyringPath := getCephKeyringPath(volID, adminCr.id)
if err = os.Remove(keyringPath); err != nil {
klog.Errorf("failed to remove keyring file %s with error %s", keyringPath, err)
}
secretPath := getCephSecretPath(volID, adminCr.id)
if err = os.Remove(secretPath); err != nil {
klog.Errorf("failed to remove secret file %s with error %s", secretPath, err)
}
return nil
}

View File

@ -1,128 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"sync"
"github.com/golang/glog"
)
const (
controllerCacheRoot = PluginFolder + "/controller/plugin-cache"
)
type controllerCacheEntry struct {
VolOptions volumeOptions
VolumeID volumeID
}
type controllerCacheMap map[volumeID]*controllerCacheEntry
var (
ctrCache = make(controllerCacheMap)
ctrCacheMtx sync.Mutex
)
// Load all .json files from controllerCacheRoot into ctrCache
// Called from driver.go's Run()
func loadControllerCache() error {
cacheDir, err := ioutil.ReadDir(controllerCacheRoot)
if err != nil {
return fmt.Errorf("cannot read controller cache from %s: %v", controllerCacheRoot, err)
}
ctrCacheMtx.Lock()
defer ctrCacheMtx.Unlock()
for _, fi := range cacheDir {
if !strings.HasSuffix(fi.Name(), ".json") || !fi.Mode().IsRegular() {
continue
}
f, err := os.Open(path.Join(controllerCacheRoot, fi.Name()))
if err != nil {
glog.Errorf("cephfs: cloudn't read '%s' from controller cache: %v", fi.Name(), err)
continue
}
d := json.NewDecoder(f)
ent := &controllerCacheEntry{}
if err = d.Decode(ent); err != nil {
glog.Errorf("cephfs: failed to parse '%s': %v", fi.Name(), err)
} else {
ctrCache[ent.VolumeID] = ent
}
f.Close()
}
return nil
}
func getControllerCacheEntryPath(volId volumeID) string {
return path.Join(controllerCacheRoot, string(volId)+".json")
}
func (m controllerCacheMap) insert(ent *controllerCacheEntry) error {
filePath := getControllerCacheEntryPath(ent.VolumeID)
ctrCacheMtx.Lock()
defer ctrCacheMtx.Unlock()
f, err := os.Create(filePath)
if err != nil {
return fmt.Errorf("couldn't create cache entry file '%s': %v", filePath, err)
}
defer f.Close()
enc := json.NewEncoder(f)
if err = enc.Encode(ent); err != nil {
return fmt.Errorf("failed to encode cache entry for volume %s: %v", ent.VolumeID, err)
}
m[ent.VolumeID] = ent
return nil
}
func (m controllerCacheMap) pop(volId volumeID) (*controllerCacheEntry, error) {
ctrCacheMtx.Lock()
defer ctrCacheMtx.Unlock()
ent, ok := m[volId]
if !ok {
return nil, fmt.Errorf("cache entry for volume %s does not exist", volId)
}
filePath := getControllerCacheEntryPath(volId)
if err := os.Remove(filePath); err != nil {
return nil, fmt.Errorf("failed to remove cache entry file '%s': %v", filePath, err)
}
delete(m, volId)
return ent, nil
}

View File

@ -17,143 +17,154 @@ limitations under the License.
package cephfs
import (
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"
)
type controllerServer struct {
// ControllerServer struct of CEPH CSI driver with supported methods of CSI
// controller server spec.
type ControllerServer struct {
*csicommon.DefaultControllerServer
MetadataStore util.CachePersister
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
type controllerCacheEntry struct {
VolOptions volumeOptions
VolumeID volumeID
}
// CreateVolume creates the volume in backend and store the volume metadata
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateCreateVolumeRequest(req); err != nil {
glog.Errorf("CreateVolumeRequest validation failed: %v", err)
klog.Errorf("CreateVolumeRequest validation failed: %v", err)
return nil, err
}
// Configuration
volOptions, err := newVolumeOptions(req.GetParameters())
secret := req.GetSecrets()
volOptions, err := newVolumeOptions(req.GetParameters(), secret)
if err != nil {
glog.Errorf("validation of volume options failed: %v", err)
klog.Errorf("validation of volume options failed: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
volId := makeVolumeID(req.GetName())
conf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId}
if err = conf.writeToFile(); err != nil {
glog.Errorf("failed to write ceph config file to %s: %v", getCephConfPath(volId), err)
return nil, status.Error(codes.Internal, err.Error())
}
volID := makeVolumeID(req.GetName())
// Create a volume in case the user didn't provide one
if volOptions.ProvisionVolume {
// Admin credentials are required
cr, err := getAdminCredentials(req.GetSecrets())
cr, err := getAdminCredentials(secret)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if err = storeCephCredentials(volId, cr); err != nil {
glog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err)
if err = storeCephCredentials(volID, cr); err != nil {
klog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = createVolume(volOptions, cr, volId, req.GetCapacityRange().GetRequiredBytes()); err != nil {
glog.Errorf("failed to create volume %s: %v", req.GetName(), err)
if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil {
klog.Errorf("failed to create volume %s: %v", req.GetName(), err)
return nil, status.Error(codes.Internal, err.Error())
}
if _, err = createCephUser(volOptions, cr, volId); err != nil {
glog.Errorf("failed to create ceph user for volume %s: %v", req.GetName(), err)
if _, err = createCephUser(volOptions, cr, volID); err != nil {
klog.Errorf("failed to create ceph user for volume %s: %v", req.GetName(), err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully created volume %s", volId)
klog.Infof("cephfs: successfully created volume %s", volID)
} else {
glog.Infof("cephfs: volume %s is provisioned statically", volId)
klog.Infof("cephfs: volume %s is provisioned statically", volID)
}
if err = ctrCache.insert(&controllerCacheEntry{VolOptions: *volOptions, VolumeID: volId}); err != nil {
glog.Errorf("failed to store a cache entry for volume %s: %v", volId, err)
ce := &controllerCacheEntry{VolOptions: *volOptions, VolumeID: volID}
if err := cs.MetadataStore.Create(string(volID), ce); err != nil {
klog.Errorf("failed to store a cache entry for volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: string(volId),
VolumeId: string(volID),
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: req.GetParameters(),
},
}, nil
}
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.validateDeleteVolumeRequest(req); err != nil {
glog.Errorf("DeleteVolumeRequest validation failed: %v", err)
// DeleteVolume deletes the volume in backend and removes the volume metadata
// from store
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.validateDeleteVolumeRequest(); err != nil {
klog.Errorf("DeleteVolumeRequest validation failed: %v", err)
return nil, err
}
var (
volId = volumeID(req.GetVolumeId())
volID = volumeID(req.GetVolumeId())
secrets = req.GetSecrets()
err error
)
// Load volume info from cache
ent, err := ctrCache.pop(volId)
if err != nil {
glog.Error(err)
ce := &controllerCacheEntry{}
if err = cs.MetadataStore.Get(string(volID), ce); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !ent.VolOptions.ProvisionVolume {
if !ce.VolOptions.ProvisionVolume {
// DeleteVolume() is forbidden for statically provisioned volumes!
glog.Warningf("volume %s is provisioned statically, aborting delete", volId)
klog.Warningf("volume %s is provisioned statically, aborting delete", volID)
return &csi.DeleteVolumeResponse{}, nil
}
defer func() {
if err != nil {
// Reinsert cache entry for retry
if insErr := ctrCache.insert(ent); insErr != nil {
glog.Errorf("failed to reinsert volume cache entry in rollback procedure for volume %s: %v", volId, err)
// mons may have changed since create volume,
// retrieve the latest mons and override old mons
if mon, secretsErr := getMonValFromSecret(secrets); secretsErr == nil && len(mon) > 0 {
klog.Infof("overriding monitors [%q] with [%q] for volume %s", ce.VolOptions.Monitors, mon, volID)
ce.VolOptions.Monitors = mon
}
}
}()
// Deleting a volume requires admin credentials
cr, err := getAdminCredentials(req.GetSecrets())
cr, err := getAdminCredentials(secrets)
if err != nil {
glog.Errorf("failed to retrieve admin credentials: %v", err)
klog.Errorf("failed to retrieve admin credentials: %v", err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if err = purgeVolume(volId, cr, &ent.VolOptions); err != nil {
glog.Errorf("failed to delete volume %s: %v", volId, err)
if err = purgeVolume(volID, cr, &ce.VolOptions); err != nil {
klog.Errorf("failed to delete volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err = deleteCephUser(cr, volId); err != nil {
glog.Errorf("failed to delete ceph user for volume %s: %v", volId, err)
if err = deleteCephUser(&ce.VolOptions, cr, volID); err != nil {
klog.Errorf("failed to delete ceph user for volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully deleted volume %s", volId)
if err = cs.MetadataStore.Delete(string(volID)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.Infof("cephfs: successfully deleted volume %s", volID)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ValidateVolumeCapabilities(
// ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported.
func (cs *ControllerServer) ValidateVolumeCapabilities(
ctx context.Context,
req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// Cephfs doesn't support Block volume

View File

@ -19,10 +19,11 @@ package cephfs
import "fmt"
const (
credUserId = "userID"
credUserID = "userID"
credUserKey = "userKey"
credAdminId = "adminID"
credAdminID = "adminID"
credAdminKey = "adminKey"
credMonitors = "monitors"
)
type credentials struct {
@ -48,9 +49,16 @@ func getCredentials(idField, keyField string, secrets map[string]string) (*crede
}
func getUserCredentials(secrets map[string]string) (*credentials, error) {
return getCredentials(credUserId, credUserKey, secrets)
return getCredentials(credUserID, credUserKey, secrets)
}
func getAdminCredentials(secrets map[string]string) (*credentials, error) {
return getCredentials(credAdminId, credAdminKey, secrets)
return getCredentials(credAdminID, credAdminKey, secrets)
}
func getMonValFromSecret(secrets map[string]string) (string, error) {
if mons, ok := secrets[credMonitors]; ok {
return mons, nil
}
return "", fmt.Errorf("missing %q", credMonitors)
}

View File

@ -17,78 +17,76 @@ limitations under the License.
package cephfs
import (
"os"
"github.com/golang/glog"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"
)
const (
PluginFolder = "/var/lib/kubelet/plugins_registry/csi-cephfsplugin"
Version = "1.0.0"
// PluginFolder defines the location of ceph plugin
PluginFolder = "/var/lib/kubelet/plugins/csi-cephfsplugin"
// version of ceph driver
version = "1.0.0"
)
type cephfsDriver struct {
driver *csicommon.CSIDriver
// Driver contains the default identity,node and controller struct
type Driver struct {
cd *csicommon.CSIDriver
is *identityServer
ns *nodeServer
cs *controllerServer
caps []*csi.VolumeCapability_AccessMode
cscaps []*csi.ControllerServiceCapability
is *IdentityServer
ns *NodeServer
cs *ControllerServer
}
var (
driver *cephfsDriver
// DefaultVolumeMounter for mounting volumes
DefaultVolumeMounter string
)
func NewCephFSDriver() *cephfsDriver {
return &cephfsDriver{}
// NewDriver returns new ceph driver
func NewDriver() *Driver {
return &Driver{}
}
func NewIdentityServer(d *csicommon.CSIDriver) *identityServer {
return &identityServer{
// NewIdentityServer initialize a identity server for ceph CSI driver
func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
return &IdentityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
func NewControllerServer(d *csicommon.CSIDriver) *controllerServer {
return &controllerServer{
// NewControllerServer initialize a controller server for ceph CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
}
}
func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
return &nodeServer{
// NewNodeServer initialize a node server for ceph CSI driver.
func NewNodeServer(d *csicommon.CSIDriver) *NodeServer {
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
}
}
func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string) {
glog.Infof("Driver: %v version: %v", driverName, Version)
// Run start a non-blocking grpc controller,node and identityserver for
// ceph CSI driver which can serve multiple parallel requests
func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cachePersister util.CachePersister) {
klog.Infof("Driver: %v version: %v", driverName, version)
// Configuration
if err := os.MkdirAll(controllerCacheRoot, 0755); err != nil {
glog.Fatalf("cephfs: failed to create %s: %v", controllerCacheRoot, err)
return
}
if err := loadControllerCache(); err != nil {
glog.Errorf("cephfs: failed to read volume cache: %v", err)
}
if err := loadAvailableMounters(); err != nil {
glog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
klog.Fatalf("cephfs: failed to load ceph mounters: %v", err)
}
if volumeMounter != "" {
if err := validateMounter(volumeMounter); err != nil {
glog.Fatalln(err)
klog.Fatalln(err)
} else {
DefaultVolumeMounter = volumeMounter
}
@ -99,28 +97,33 @@ func (fs *cephfsDriver) Run(driverName, nodeId, endpoint, volumeMounter string)
DefaultVolumeMounter = availableMounters[0]
}
glog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
klog.Infof("cephfs: setting default volume mounter to %s", DefaultVolumeMounter)
if err := writeCephConfig(); err != nil {
klog.Fatalf("failed to write ceph configuration file: %v", err)
}
// Initialize default library driver
fs.driver = csicommon.NewCSIDriver(driverName, Version, nodeId)
if fs.driver == nil {
glog.Fatalln("Failed to initialize CSI driver")
fs.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
if fs.cd == nil {
klog.Fatalln("failed to initialize CSI driver")
}
fs.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
fs.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
})
fs.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
fs.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
})
// Create gRPC servers
fs.is = NewIdentityServer(fs.driver)
fs.ns = NewNodeServer(fs.driver)
fs.cs = NewControllerServer(fs.driver)
fs.is = NewIdentityServer(fs.cd)
fs.ns = NewNodeServer(fs.cd)
fs.cs = NewControllerServer(fs.cd, cachePersister)
server := csicommon.NewNonBlockingGRPCServer()
server.Start(endpoint, fs.is, fs.cs, fs.ns)

View File

@ -23,11 +23,14 @@ import (
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type identityServer struct {
// IdentityServer struct of ceph CSI driver with supported methods of CSI
// identity server spec.
type IdentityServer struct {
*csicommon.DefaultIdentityServer
}
func (is *identityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
// GetPluginCapabilities returns available capabilities of the ceph driver
func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{

View File

@ -21,22 +21,24 @@ import (
"fmt"
"os"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type nodeServer struct {
// NodeServer struct of ceph CSI driver with supported methods of CSI
// node server spec.
type NodeServer struct {
*csicommon.DefaultNodeServer
}
func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
var (
userCr = &credentials{}
err error
cr *credentials
secrets = req.GetSecrets()
)
if volOptions.ProvisionVolume {
@ -44,40 +46,43 @@ func getCredentialsForVolume(volOptions *volumeOptions, volId volumeID, req *csi
// First, store admin credentials - those are needed for retrieving the user credentials
adminCr, err := getAdminCredentials(req.GetSecrets())
adminCr, err := getAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
if err = storeCephCredentials(volId, adminCr); err != nil {
if err = storeCephCredentials(volID, adminCr); err != nil {
return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err)
}
// Then get the ceph user
entity, err := getCephUser(adminCr, volId)
entity, err := getCephUser(volOptions, adminCr, volID)
if err != nil {
return nil, fmt.Errorf("failed to get ceph user: %v", err)
}
userCr = entity.toCredentials()
cr = entity.toCredentials()
} else {
// The volume is pre-made, credentials are in node stage secrets
userCr, err = getUserCredentials(req.GetSecrets())
userCr, err := getUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
cr = userCr
}
if err = storeCephCredentials(volId, userCr); err != nil {
if err := storeCephCredentials(volID, cr); err != nil {
return nil, fmt.Errorf("failed to store ceph user credentials: %v", err)
}
return userCr, nil
return cr, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
if err := validateNodeStageVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
@ -85,27 +90,21 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
// Configuration
stagingTargetPath := req.GetStagingTargetPath()
volId := volumeID(req.GetVolumeId())
volID := volumeID(req.GetVolumeId())
volOptions, err := newVolumeOptions(req.GetVolumeContext())
volOptions, err := newVolumeOptions(req.GetVolumeContext(), req.GetSecrets())
if err != nil {
glog.Errorf("error reading volume options for volume %s: %v", volId, err)
klog.Errorf("error reading volume options for volume %s: %v", volID, err)
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if volOptions.ProvisionVolume {
// Dynamically provisioned volumes don't have their root path set, do it here
volOptions.RootPath = getVolumeRootPath_ceph(volId)
volOptions.RootPath = getVolumeRootPathCeph(volID)
}
if err = createMountPoint(stagingTargetPath); err != nil {
glog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volId, err)
return nil, status.Error(codes.Internal, err.Error())
}
cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId}
if err = cephConf.writeToFile(); err != nil {
glog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volId), volId, err)
klog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
@ -114,41 +113,53 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
isMnt, err := isMountPoint(stagingTargetPath)
if err != nil {
glog.Errorf("stat failed: %v", err)
klog.Errorf("stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
glog.Infof("cephfs: volume %s is already mounted to %s, skipping", volId, stagingTargetPath)
klog.Infof("cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
// It's not, mount now
cr, err := getCredentialsForVolume(volOptions, volId, req)
if err != nil {
glog.Errorf("failed to get ceph credentials for volume %s: %v", volId, err)
return nil, status.Error(codes.Internal, err.Error())
if err = ns.mount(volOptions, req); err != nil {
return nil, err
}
m, err := newMounter(volOptions)
if err != nil {
glog.Errorf("failed to create mounter for volume %s: %v", volId, err)
}
glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name())
if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil {
glog.Errorf("failed to mount volume %s: %v", volId, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully mounted volume %s to %s", volId, stagingTargetPath)
klog.Infof("cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
cr, err := getCredentialsForVolume(volOptions, volID, req)
if err != nil {
klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
m, err := newMounter(volOptions)
if err != nil {
klog.Errorf("failed to create mounter for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name())
if err = m.mount(stagingTargetPath, cr, volOptions, volID); err != nil {
klog.Errorf("failed to mount volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
return nil
}
// NodePublishVolume mounts the volume mounted to the staging path to the target
// path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if err := validateNodePublishVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
@ -156,10 +167,10 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
// Configuration
targetPath := req.GetTargetPath()
volId := req.GetVolumeId()
volID := req.GetVolumeId()
if err := createMountPoint(targetPath); err != nil {
glog.Errorf("failed to create mount point at %s: %v", targetPath, err)
klog.Errorf("failed to create mount point at %s: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
@ -168,66 +179,75 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
isMnt, err := isMountPoint(targetPath)
if err != nil {
glog.Errorf("stat failed: %v", err)
klog.Errorf("stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
glog.Infof("cephfs: volume %s is already bind-mounted to %s", volId, targetPath)
klog.Infof("cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
// It's not, mount now
if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly()); err != nil {
glog.Errorf("failed to bind-mount volume %s: %v", volId, err)
klog.Errorf("failed to bind-mount volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully bind-mounted volume %s to %s", volId, targetPath)
klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
if err := validateNodeUnpublishVolumeRequest(req); err != nil {
// NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
var err error
if err = validateNodeUnpublishVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
targetPath := req.GetTargetPath()
// Unmount the bind-mount
if err := unmountVolume(targetPath); err != nil {
if err = unmountVolume(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
os.Remove(targetPath)
if err = os.Remove(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
klog.Infof("cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
if err := validateNodeUnstageVolumeRequest(req); err != nil {
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
var err error
if err = validateNodeUnstageVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
stagingTargetPath := req.GetStagingTargetPath()
// Unmount the volume
if err := unmountVolume(stagingTargetPath); err != nil {
if err = unmountVolume(stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
os.Remove(stagingTargetPath)
if err = os.Remove(stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
klog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
{

View File

@ -19,12 +19,13 @@ package cephfs
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"os/exec"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/kubernetes/pkg/util/mount"
@ -37,9 +38,9 @@ func makeVolumeID(volName string) volumeID {
}
func execCommand(command string, args ...string) ([]byte, error) {
glog.V(4).Infof("cephfs: EXEC %s %s", command, args)
klog.V(4).Infof("cephfs: EXEC %s %s", command, args)
cmd := exec.Command(command, args...)
cmd := exec.Command(command, args...) // #nosec
return cmd.CombinedOutput()
}
@ -52,7 +53,8 @@ func execCommandAndValidate(program string, args ...string) error {
return nil
}
func execCommandJson(v interface{}, program string, args ...string) error {
func execCommandJSON(v interface{}, args ...string) error {
program := "ceph"
out, err := execCommand(program, args...)
if err != nil {
@ -74,11 +76,11 @@ func isMountPoint(p string) (bool, error) {
return !notMnt, nil
}
func storeCephCredentials(volId volumeID, cr *credentials) error {
func storeCephCredentials(volID volumeID, cr *credentials) error {
keyringData := cephKeyringData{
UserId: cr.id,
UserID: cr.id,
Key: cr.key,
VolumeID: volId,
VolumeID: volID,
}
if err := keyringData.writeToFile(); err != nil {
@ -86,23 +88,20 @@ func storeCephCredentials(volId volumeID, cr *credentials) error {
}
secret := cephSecretData{
UserId: cr.id,
UserID: cr.id,
Key: cr.key,
VolumeID: volId,
VolumeID: volID,
}
if err := secret.writeToFile(); err != nil {
err := secret.writeToFile()
return err
}
return nil
}
//
// Controller service request validation
//
func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
}
@ -111,14 +110,21 @@ func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeReq
return status.Error(codes.InvalidArgument, "Volume Name cannot be empty")
}
if req.GetVolumeCapabilities() == nil {
reqCaps := req.GetVolumeCapabilities()
if reqCaps == nil {
return status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
}
for _, cap := range reqCaps {
if cap.GetBlock() != nil {
return status.Error(codes.Unimplemented, "block volume not supported")
}
}
return nil
}
func (cs *controllerServer) validateDeleteVolumeRequest(req *csi.DeleteVolumeRequest) error {
func (cs *ControllerServer) validateDeleteVolumeRequest() error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return fmt.Errorf("invalid DeleteVolumeRequest: %v", err)
}
@ -132,19 +138,19 @@ func (cs *controllerServer) validateDeleteVolumeRequest(req *csi.DeleteVolumeReq
func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
if req.GetVolumeCapability() == nil {
return fmt.Errorf("volume capability missing in request")
return errors.New("volume capability missing in request")
}
if req.GetVolumeId() == "" {
return fmt.Errorf("volume ID missing in request")
return errors.New("volume ID missing in request")
}
if req.GetStagingTargetPath() == "" {
return fmt.Errorf("staging target path missing in request")
return errors.New("staging target path missing in request")
}
if req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {
return fmt.Errorf("stage secrets cannot be nil or empty")
return errors.New("stage secrets cannot be nil or empty")
}
return nil
@ -152,11 +158,11 @@ func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
func validateNodeUnstageVolumeRequest(req *csi.NodeUnstageVolumeRequest) error {
if req.GetVolumeId() == "" {
return fmt.Errorf("volume ID missing in request")
return errors.New("volume ID missing in request")
}
if req.GetStagingTargetPath() == "" {
return fmt.Errorf("staging target path missing in request")
return errors.New("staging target path missing in request")
}
return nil
@ -164,15 +170,15 @@ func validateNodeUnstageVolumeRequest(req *csi.NodeUnstageVolumeRequest) error {
func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
if req.GetVolumeCapability() == nil {
return fmt.Errorf("volume capability missing in request")
return errors.New("volume capability missing in request")
}
if req.GetVolumeId() == "" {
return fmt.Errorf("volume ID missing in request")
return errors.New("volume ID missing in request")
}
if req.GetTargetPath() == "" {
return fmt.Errorf("varget path missing in request")
return errors.New("varget path missing in request")
}
return nil
@ -180,11 +186,11 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
func validateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) error {
if req.GetVolumeId() == "" {
return fmt.Errorf("volume ID missing in request")
return errors.New("volume ID missing in request")
}
if req.GetTargetPath() == "" {
return fmt.Errorf("target path missing in request")
return errors.New("target path missing in request")
}
return nil

View File

@ -20,6 +20,8 @@ import (
"fmt"
"os"
"path"
"k8s.io/klog"
)
const (
@ -29,28 +31,28 @@ const (
namespacePrefix = "ns-"
)
func getCephRootPath_local(volId volumeID) string {
return cephRootPrefix + string(volId)
func getCephRootPathLocal(volID volumeID) string {
return cephRootPrefix + string(volID)
}
func getCephRootVolumePath_local(volId volumeID) string {
return path.Join(getCephRootPath_local(volId), cephVolumesRoot, string(volId))
func getCephRootVolumePathLocal(volID volumeID) string {
return path.Join(getCephRootPathLocal(volID), cephVolumesRoot, string(volID))
}
func getVolumeRootPath_ceph(volId volumeID) string {
return path.Join("/", cephVolumesRoot, string(volId))
func getVolumeRootPathCeph(volID volumeID) string {
return path.Join("/", cephVolumesRoot, string(volID))
}
func getVolumeNamespace(volId volumeID) string {
return namespacePrefix + string(volId)
func getVolumeNamespace(volID volumeID) string {
return namespacePrefix + string(volID)
}
func setVolumeAttribute(root, attrName, attrValue string) error {
return execCommandAndValidate("setfattr", "-n", attrName, "-v", attrValue, root)
}
func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeID, bytesQuota int64) error {
cephRoot := getCephRootPath_local(volId)
func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error {
cephRoot := getCephRootPathLocal(volID)
if err := createMountPoint(cephRoot); err != nil {
return err
@ -65,17 +67,14 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err)
}
defer func() {
unmountVolume(cephRoot)
os.Remove(cephRoot)
}()
defer unmountAndRemove(cephRoot)
volOptions.RootPath = getVolumeRootPath_ceph(volId)
localVolRoot := getCephRootVolumePath_local(volId)
volOptions.RootPath = getVolumeRootPathCeph(volID)
localVolRoot := getCephRootVolumePathLocal(volID)
if err := createMountPoint(localVolRoot); err != nil {
return err
@ -91,17 +90,17 @@ func createVolume(volOptions *volumeOptions, adminCr *credentials, volId volumeI
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
}
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volId)); err != nil {
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil {
return err
}
return nil
}
func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions) error {
func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error {
var (
cephRoot = getCephRootPath_local(volId)
volRoot = getCephRootVolumePath_local(volId)
cephRoot = getCephRootPathLocal(volID)
volRoot = getCephRootVolumePathLocal(volID)
volRootDeleting = volRoot + "-deleting"
)
@ -118,22 +117,30 @@ func purgeVolume(volId volumeID, adminCr *credentials, volOptions *volumeOptions
return fmt.Errorf("failed to create mounter: %v", err)
}
if err = m.mount(cephRoot, adminCr, volOptions, volId); err != nil {
if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil {
return fmt.Errorf("error mounting ceph root: %v", err)
}
defer func() {
unmountVolume(volRoot)
os.Remove(volRoot)
}()
defer unmountAndRemove(cephRoot)
if err := os.Rename(volRoot, volRootDeleting); err != nil {
return fmt.Errorf("coudln't mark volume %s for deletion: %v", volId, err)
return fmt.Errorf("coudln't mark volume %s for deletion: %v", volID, err)
}
if err := os.RemoveAll(volRootDeleting); err != nil {
return fmt.Errorf("failed to delete volume %s: %v", volId, err)
return fmt.Errorf("failed to delete volume %s: %v", volID, err)
}
return nil
}
func unmountAndRemove(mountPoint string) {
var err error
if err = unmountVolume(mountPoint); err != nil {
klog.Errorf("failed to unmount %s with error %s", mountPoint, err)
}
if err = os.Remove(mountPoint); err != nil {
klog.Errorf("failed to remove %s with error %s", mountPoint, err)
}
}

View File

@ -18,14 +18,15 @@ package cephfs
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
)
const (
volumeMounter_fuse = "fuse"
volumeMounter_kernel = "kernel"
volumeMounterFuse = "fuse"
volumeMounterKernel = "kernel"
)
var (
@ -35,26 +36,28 @@ var (
// Load available ceph mounters installed on system into availableMounters
// Called from driver.go's Run()
func loadAvailableMounters() error {
// #nosec
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
// #nosec
kernelMounterProbe := exec.Command("mount.ceph")
if fuseMounterProbe.Run() == nil {
availableMounters = append(availableMounters, volumeMounter_fuse)
availableMounters = append(availableMounters, volumeMounterFuse)
}
if kernelMounterProbe.Run() == nil {
availableMounters = append(availableMounters, volumeMounter_kernel)
availableMounters = append(availableMounters, volumeMounterKernel)
}
if len(availableMounters) == 0 {
return fmt.Errorf("no ceph mounters found on system")
return errors.New("no ceph mounters found on system")
}
return nil
}
type volumeMounter interface {
mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error
mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error
name() string
}
@ -87,9 +90,9 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
// Create the mounter
switch chosenMounter {
case volumeMounter_fuse:
case volumeMounterFuse:
return &fuseMounter{}, nil
case volumeMounter_kernel:
case volumeMounterKernel:
return &kernelMounter{}, nil
}
@ -98,12 +101,13 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
type fuseMounter struct{}
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
args := [...]string{
mountPoint,
"-c", getCephConfPath(volId),
"-m", volOptions.Monitors,
"-c", cephConfigPath,
"-n", cephEntityClientPrefix + cr.id,
"--keyring", getCephKeyringPath(volId, cr.id),
"--keyring", getCephKeyringPath(volID, cr.id),
"-r", volOptions.RootPath,
"-o", "nonempty",
}
@ -120,19 +124,19 @@ func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, vo
return nil
}
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
if err := createMountPoint(mountPoint); err != nil {
return err
}
return mountFuse(mountPoint, cr, volOptions, volId)
return mountFuse(mountPoint, cr, volOptions, volID)
}
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
type kernelMounter struct{}
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
if err := execCommandAndValidate("modprobe", "ceph"); err != nil {
return err
}
@ -142,16 +146,16 @@ func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions,
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
mountPoint,
"-o",
fmt.Sprintf("name=%s,secretfile=%s", cr.id, getCephSecretPath(volId, cr.id)),
fmt.Sprintf("name=%s,secretfile=%s", cr.id, getCephSecretPath(volID, cr.id)),
)
}
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volId volumeID) error {
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
if err := createMountPoint(mountPoint); err != nil {
return err
}
return mountKernel(mountPoint, cr, volOptions, volId)
return mountKernel(mountPoint, cr, volOptions, volID)
}
func (m *kernelMounter) name() string { return "Ceph kernel client" }

View File

@ -17,7 +17,6 @@ limitations under the License.
package cephfs
import (
"errors"
"fmt"
"strconv"
)
@ -29,11 +28,13 @@ type volumeOptions struct {
Mounter string `json:"mounter"`
ProvisionVolume bool `json:"provisionVolume"`
MonValueFromSecret string `json:"monValueFromSecret"`
}
func validateNonEmptyField(field, fieldName string) error {
if field == "" {
return fmt.Errorf("Parameter '%s' cannot be empty", fieldName)
return fmt.Errorf("parameter '%s' cannot be empty", fieldName)
}
return nil
@ -41,8 +42,10 @@ func validateNonEmptyField(field, fieldName string) error {
func (o *volumeOptions) validate() error {
if err := validateNonEmptyField(o.Monitors, "monitors"); err != nil {
if err = validateNonEmptyField(o.MonValueFromSecret, "monValueFromSecret"); err != nil {
return err
}
}
if err := validateNonEmptyField(o.RootPath, "rootPath"); err != nil {
if !o.ProvisionVolume {
@ -50,7 +53,7 @@ func (o *volumeOptions) validate() error {
}
} else {
if o.ProvisionVolume {
return fmt.Errorf("Non-empty field rootPath is in conflict with provisionVolume=true")
return fmt.Errorf("non-empty field rootPath is in conflict with provisionVolume=true")
}
}
@ -70,56 +73,49 @@ func (o *volumeOptions) validate() error {
}
func extractOption(dest *string, optionLabel string, options map[string]string) error {
if opt, ok := options[optionLabel]; !ok {
return errors.New("Missing required field " + optionLabel)
} else {
opt, ok := options[optionLabel]
if !ok {
return fmt.Errorf("missing required field %s", optionLabel)
}
*dest = opt
return nil
}
}
func validateMounter(m string) error {
switch m {
case volumeMounter_fuse:
case volumeMounter_kernel:
case volumeMounterFuse:
case volumeMounterKernel:
default:
return fmt.Errorf("Unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m)
return fmt.Errorf("unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m)
}
return nil
}
func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
func newVolumeOptions(volOptions, secret map[string]string) (*volumeOptions, error) {
var (
opts volumeOptions
provisionVolumeBool string
err error
)
// extract mon from secret first
if err = extractOption(&opts.MonValueFromSecret, "monValueFromSecret", volOptions); err == nil {
mon := ""
if mon, err = getMonValFromSecret(secret); err == nil && len(mon) > 0 {
opts.Monitors = mon
}
}
if len(opts.Monitors) == 0 {
// if not set in secret, get it from parameter
if err = extractOption(&opts.Monitors, "monitors", volOptions); err != nil {
return nil, err
}
if err = extractOption(&provisionVolumeBool, "provisionVolume", volOptions); err != nil {
return nil, err
}
if opts.ProvisionVolume, err = strconv.ParseBool(provisionVolumeBool); err != nil {
return nil, fmt.Errorf("Failed to parse provisionVolume: %v", err)
}
if opts.ProvisionVolume {
if err = extractOption(&opts.Pool, "pool", volOptions); err != nil {
return nil, err
}
} else {
if err = extractOption(&opts.RootPath, "rootPath", volOptions); err != nil {
return nil, err
return nil, fmt.Errorf("either monitors or monValueFromSecret should be set")
}
}
// This field is optional, don't check for its presence
extractOption(&opts.Mounter, "mounter", volOptions)
if err = extractNewVolOpt(&opts, volOptions); err != nil {
return nil, err
}
if err = opts.validate(); err != nil {
return nil, err
@ -127,3 +123,33 @@ func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) {
return &opts, nil
}
func extractNewVolOpt(opts *volumeOptions, volOpt map[string]string) error {
var (
provisionVolumeBool string
err error
)
if err = extractOption(&provisionVolumeBool, "provisionVolume", volOpt); err != nil {
return err
}
if opts.ProvisionVolume, err = strconv.ParseBool(provisionVolumeBool); err != nil {
return fmt.Errorf("failed to parse provisionVolume: %v", err)
}
if opts.ProvisionVolume {
if err = extractOption(&opts.Pool, "pool", volOpt); err != nil {
return err
}
} else {
if err = extractOption(&opts.RootPath, "rootPath", volOpt); err != nil {
return err
}
}
// This field is optional, don't check for its presence
// nolint
// (skip errcheck and gosec as this is optional)
extractOption(&opts.Mounter, "mounter", volOpt)
return nil
}

View File

@ -20,65 +20,75 @@ import (
"fmt"
"os"
"os/exec"
"path"
"syscall"
"time"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
)
const (
oneGB = 1073741824
)
type controllerServer struct {
// ControllerServer struct of rbd CSI driver with supported methods of CSI
// controller server spec.
type ControllerServer struct {
*csicommon.DefaultControllerServer
MetadataStore util.CachePersister
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
var (
rbdVolumes = map[string]*rbdVolume{}
rbdSnapshots = map[string]*rbdSnapshot{}
)
// LoadExDataFromMetadataStore loads the rbd volume and snapshot
// info from metadata store
func (cs *ControllerServer) LoadExDataFromMetadataStore() error {
vol := &rbdVolume{}
// nolint
cs.MetadataStore.ForAll("csi-rbd-vol-", vol, func(identifier string) error {
rbdVolumes[identifier] = vol
return nil
})
snap := &rbdSnapshot{}
// nolint
cs.MetadataStore.ForAll("csi-rbd-(.*)-snap-", snap, func(identifier string) error {
rbdSnapshots[identifier] = snap
return nil
})
klog.Infof("Loaded %d volumes and %d snapshots from metadata store", len(rbdVolumes), len(rbdSnapshots))
return nil
}
func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
klog.V(3).Infof("invalid create volume req: %v", protosanitizer.StripSecrets(req))
return err
}
// Check sanity of request Name, Volume Capabilities
if len(req.Name) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty")
return status.Error(codes.InvalidArgument, "Volume Name cannot be empty")
}
if req.VolumeCapabilities == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
}
volumeNameMutex.LockKey(req.GetName())
defer volumeNameMutex.UnlockKey(req.GetName())
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getRBDVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of exisiting volume is the same as in new
// request
if exVol.VolSize >= int64(req.GetCapacityRange().GetRequiredBytes()) {
// exisiting volume is compatible with new request and should be reused.
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
VolumeContext: req.GetParameters(),
},
}, nil
}
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Volume with the same name: %s but with different size already exist", req.GetName()))
return status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
}
return nil
}
func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) {
// TODO (sbezverk) Last check for not exceeding total storage capacity
rbdVol, err := getRBDVolumeOptions(req.GetParameters())
@ -93,93 +103,163 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volName = rbdVol.Pool + "-dynamic-pvc-" + uniqueID
}
rbdVol.VolName = volName
volumeID := "csi-rbd-" + uniqueID
volumeID := "csi-rbd-vol-" + uniqueID
rbdVol.VolID = volumeID
// Volume Size - Default is 1 GiB
volSizeBytes := int64(oneGB)
if req.GetCapacityRange() != nil {
volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes())
volSizeBytes = req.GetCapacityRange().GetRequiredBytes()
}
rbdVol.VolSize = volSizeBytes
volSizeGB := int(volSizeBytes / 1024 / 1024 / 1024)
// Check if there is already RBD image with requested name
found, _, _ := rbdStatus(rbdVol, rbdVol.UserId, req.GetSecrets())
if !found {
// if VolumeContentSource is not nil, this request is for snapshot
if req.VolumeContentSource != nil {
snapshot := req.VolumeContentSource.GetSnapshot()
if snapshot == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Snapshot cannot be empty")
}
return rbdVol, nil
}
snapshotID := snapshot.GetSnapshotId()
if len(snapshotID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume Snapshot ID cannot be empty")
}
// CreateVolume creates the volume in backend and store the volume metadata
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
rbdSnap := &rbdSnapshot{}
if err := loadSnapInfo(snapshotID, path.Join(PluginFolder, "controller-snap"), rbdSnap); err != nil {
if err := cs.validateVolumeReq(req); err != nil {
return nil, err
}
volumeNameMutex.LockKey(req.GetName())
defer func() {
if err := volumeNameMutex.UnlockKey(req.GetName()); err != nil {
klog.Warningf("failed to unlock mutex volume:%s %v", req.GetName(), err)
}
}()
err = restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminId, req.GetSecrets())
if err != nil {
return nil, err
}
glog.V(4).Infof("create volume %s from snapshot %s", volName, rbdSnap.SnapName)
} else {
if err := createRBDImage(rbdVol, volSizeGB, rbdVol.AdminId, req.GetSecrets()); err != nil {
if err != nil {
glog.Warningf("failed to create volume: %v", err)
return nil, err
}
}
glog.V(4).Infof("create volume %s", volName)
}
}
// Storing volInfo into a persistent file.
if err := persistVolInfo(volumeID, path.Join(PluginFolder, "controller"), rbdVol); err != nil {
glog.Warningf("rbd: failed to store volInfo with error: %v", err)
}
rbdVolumes[volumeID] = rbdVol
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getRBDVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize >= req.GetCapacityRange().GetRequiredBytes() {
// existing volume is compatible with new request and should be reused.
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: int64(volSizeBytes),
VolumeId: exVol.VolID,
CapacityBytes: exVol.VolSize,
VolumeContext: req.GetParameters(),
},
}, nil
}
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
rbdVol, err := parseVolCreateRequest(req)
if err != nil {
return nil, err
}
volSizeGB := int(rbdVol.VolSize / 1024 / 1024 / 1024)
// Check if there is already RBD image with requested name
err = cs.checkRBDStatus(rbdVol, req, volSizeGB)
if err != nil {
return nil, err
}
if createErr := cs.MetadataStore.Create(rbdVol.VolID, rbdVol); createErr != nil {
klog.Warningf("failed to store volume metadata with error: %v", err)
if err = deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil {
klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
return nil, err
}
return nil, createErr
}
rbdVolumes[rbdVol.VolID] = rbdVol
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: rbdVol.VolID,
CapacityBytes: rbdVol.VolSize,
VolumeContext: req.GetParameters(),
},
}, nil
}
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeGB int) error {
var err error
// Check if there is already RBD image with requested name
found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) // #nosec
if !found {
// if VolumeContentSource is not nil, this request is for snapshot
if req.VolumeContentSource != nil {
if err = cs.checkSnapshot(req, rbdVol); err != nil {
return err
}
} else {
err = createRBDImage(rbdVol, volSizeGB, rbdVol.AdminID, req.GetSecrets())
if err != nil {
klog.Warningf("failed to create volume: %v", err)
return err
}
klog.V(4).Infof("create volume %s", rbdVol.VolName)
}
}
return nil
}
func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *rbdVolume) error {
snapshot := req.VolumeContentSource.GetSnapshot()
if snapshot == nil {
return status.Error(codes.InvalidArgument, "Volume Snapshot cannot be empty")
}
snapshotID := snapshot.GetSnapshotId()
if len(snapshotID) == 0 {
return status.Error(codes.InvalidArgument, "Volume Snapshot ID cannot be empty")
}
rbdSnap := &rbdSnapshot{}
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil {
return err
}
err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets())
if err != nil {
return err
}
klog.V(4).Infof("create volume %s from snapshot %s", req.GetName(), rbdSnap.SnapName)
return nil
}
// DeleteVolume deletes the volume in backend and removes the volume metadata
// from store
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.Warningf("invalid delete volume req: %v", req)
klog.Warningf("invalid delete volume req: %v", protosanitizer.StripSecrets(req))
return nil, err
}
// For now the image get unconditionally deleted, but here retention policy can be checked
volumeID := req.GetVolumeId()
volumeIDMutex.LockKey(volumeID)
defer volumeIDMutex.UnlockKey(volumeID)
defer func() {
if err := volumeIDMutex.UnlockKey(volumeID); err != nil {
klog.Warningf("failed to unlock mutex volume:%s %v", volumeID, err)
}
}()
rbdVol := &rbdVolume{}
if err := loadVolInfo(volumeID, path.Join(PluginFolder, "controller"), rbdVol); err != nil {
if err := cs.MetadataStore.Get(volumeID, rbdVol); err != nil {
if os.IsNotExist(errors.Cause(err)) {
// Must have been deleted already. This is not an error (idempotency!).
return &csi.DeleteVolumeResponse{}, nil
}
return nil, err
}
volName := rbdVol.VolName
// Deleting rbd image
glog.V(4).Infof("deleting volume %s", volName)
if err := deleteRBDImage(rbdVol, rbdVol.AdminId, req.GetSecrets()); err != nil {
klog.V(4).Infof("deleting volume %s", volName)
if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil {
// TODO: can we detect "already deleted" situations here and proceed?
glog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err)
klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err)
return nil, err
}
// Removing persistent storage file for the unmapped volume
if err := deleteVolInfo(volumeID, path.Join(PluginFolder, "controller")); err != nil {
if err := cs.MetadataStore.Delete(volumeID); err != nil {
return nil, err
}
@ -187,7 +267,9 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return &csi.DeleteVolumeResponse{}, nil
}
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported.
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
for _, cap := range req.VolumeCapabilities {
if cap.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
@ -200,30 +282,30 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req
}, nil
}
func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
// ControllerUnpublishVolume returns success response
func (cs *ControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return &csi.ControllerUnpublishVolumeResponse{}, nil
}
func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
// ControllerPublishVolume returns success response
func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return &csi.ControllerPublishVolumeResponse{}, nil
}
func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
glog.Warningf("invalid create snapshot req: %v", req)
// CreateSnapshot creates the snapshot in backend and stores metadata
// in store
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
if err := cs.validateSnapshotReq(req); err != nil {
return nil, err
}
// Check sanity of request Snapshot Name, Source Volume Id
if len(req.Name) == 0 {
return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty")
}
if len(req.SourceVolumeId) == 0 {
return nil, status.Error(codes.InvalidArgument, "Source Volume ID cannot be empty")
}
snapshotNameMutex.LockKey(req.GetName())
defer snapshotNameMutex.UnlockKey(req.GetName())
defer func() {
if err := snapshotNameMutex.UnlockKey(req.GetName()); err != nil {
klog.Warningf("failed to unlock mutex snapshot:%s %v", req.GetName(), err)
}
}()
// Need to check for already existing snapshot name, and if found
// check for the requested source volume id and already allocated source volume id
@ -241,7 +323,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
},
}, nil
}
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Snapshot with the same name: %s but with different source volume id already exist", req.GetName()))
return nil, status.Errorf(codes.AlreadyExists, "Snapshot with the same name: %s but with different source volume id already exist", req.GetName())
}
rbdSnap, err := getRBDSnapshotOptions(req.GetParameters())
@ -254,10 +336,10 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
uniqueID := uuid.NewUUID().String()
rbdVolume, err := getRBDVolumeByID(req.GetSourceVolumeId())
if err != nil {
return nil, status.Error(codes.NotFound, fmt.Sprintf("Source Volume ID %s cannot found", req.GetSourceVolumeId()))
return nil, status.Errorf(codes.NotFound, "Source Volume ID %s cannot found", req.GetSourceVolumeId())
}
if !hasSnapshotFeature(rbdVolume.ImageFeatures) {
return nil, fmt.Errorf("Volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
return nil, fmt.Errorf("volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
}
rbdSnap.VolName = rbdVolume.VolName
@ -267,58 +349,18 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
rbdSnap.SourceVolumeID = req.GetSourceVolumeId()
rbdSnap.SizeBytes = rbdVolume.VolSize
err = createSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets())
err = cs.doSnapshot(rbdSnap, req.GetSecrets())
// if we already have the snapshot, return the snapshot
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
if status.ExitStatus() == int(syscall.EEXIST) {
glog.Warningf("Snapshot with the same name: %s, we return this.", req.GetName())
} else {
glog.Warningf("failed to create snapshot: %v", err)
return nil, err
}
} else {
glog.Warningf("failed to create snapshot: %v", err)
rbdSnap.CreatedAt = ptypes.TimestampNow().GetSeconds()
if err = cs.storeSnapMetadata(rbdSnap, req.GetSecrets()); err != nil {
return nil, err
}
} else {
glog.Warningf("failed to create snapshot: %v", err)
return nil, err
}
} else {
glog.V(4).Infof("create snapshot %s", snapName)
err = protectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets())
if err != nil {
err = deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err)
}
return nil, fmt.Errorf("Snapshot is created but failed to protect snapshot")
}
}
rbdSnap.CreatedAt = time.Now().UnixNano()
// Storing snapInfo into a persistent file.
if err := persistSnapInfo(snapshotID, path.Join(PluginFolder, "controller-snap"), rbdSnap); err != nil {
glog.Warningf("rbd: failed to store snapInfo with error: %v", err)
// Unprotect snapshot
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("This Snapshot should be removed but failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err))
}
// Deleting snapshot
glog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName)
if err := deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()); err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("This Snapshot should be removed but failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err))
}
return nil, err
}
rbdSnapshots[snapshotID] = rbdSnap
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
@ -333,9 +375,80 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
}, nil
}
func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
func (cs *ControllerServer) storeSnapMetadata(rbdSnap *rbdSnapshot, secret map[string]string) error {
errCreate := cs.MetadataStore.Create(rbdSnap.SnapID, rbdSnap)
if errCreate != nil {
klog.Warningf("rbd: failed to store snapInfo with error: %v", errCreate)
// Unprotect snapshot
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if err != nil {
return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
}
// Deleting snapshot
klog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName)
if err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret); err != nil {
return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
}
}
return errCreate
}
func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest) error {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
glog.Warningf("invalid delete snapshot req: %v", req)
klog.Warningf("invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
return err
}
// Check sanity of request Snapshot Name, Source Volume Id
if len(req.Name) == 0 {
return status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty")
}
if len(req.SourceVolumeId) == 0 {
return status.Error(codes.InvalidArgument, "Source Volume ID cannot be empty")
}
return nil
}
func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]string) error {
err := createSnapshot(rbdSnap, rbdSnap.AdminID, secret)
// if we already have the snapshot, return the snapshot
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
if status.ExitStatus() == int(syscall.EEXIST) {
klog.Warningf("Snapshot with the same name: %s, we return this.", rbdSnap.SnapName)
} else {
klog.Warningf("failed to create snapshot: %v", err)
return err
}
} else {
klog.Warningf("failed to create snapshot: %v", err)
return err
}
} else {
klog.Warningf("failed to create snapshot: %v", err)
return err
}
} else {
klog.V(4).Infof("create snapshot %s", rbdSnap.SnapName)
err = protectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if err != nil {
err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret)
if err != nil {
return fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err)
}
return fmt.Errorf("snapshot is created but failed to protect snapshot")
}
}
return nil
}
// DeleteSnapshot deletes the snapshot in backend and removes the
//snapshot metadata from store
func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
klog.Warningf("invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
return nil, err
}
@ -344,27 +457,31 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty")
}
snapshotIDMutex.LockKey(snapshotID)
defer snapshotIDMutex.UnlockKey(snapshotID)
defer func() {
if err := snapshotIDMutex.UnlockKey(snapshotID); err != nil {
klog.Warningf("failed to unlock mutex snapshot:%s %v", snapshotID, err)
}
}()
rbdSnap := &rbdSnapshot{}
if err := loadSnapInfo(snapshotID, path.Join(PluginFolder, "controller-snap"), rbdSnap); err != nil {
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil {
return nil, err
}
// Unprotect snapshot
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets())
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets())
if err != nil {
return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err))
return nil, status.Errorf(codes.FailedPrecondition, "failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
}
// Deleting snapshot
glog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName)
if err := deleteSnapshot(rbdSnap, rbdSnap.AdminId, req.GetSecrets()); err != nil {
return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err))
klog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName)
if err := deleteSnapshot(rbdSnap, rbdSnap.AdminID, req.GetSecrets()); err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
}
// Removing persistent storage file for the unmapped snapshot
if err := deleteSnapInfo(snapshotID, path.Join(PluginFolder, "controller-snap")); err != nil {
if err := cs.MetadataStore.Delete(snapshotID); err != nil {
return nil, err
}
@ -373,13 +490,14 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
return &csi.DeleteSnapshotResponse{}, nil
}
func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
// ListSnapshots lists the snapshots in the store
func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS); err != nil {
glog.Warningf("invalid list snapshot req: %v", req)
klog.Warningf("invalid list snapshot req: %v", req)
return nil, err
}
sourceVolumeId := req.GetSourceVolumeId()
sourceVolumeID := req.GetSourceVolumeId()
// TODO (sngchlko) list with token
// TODO (#94) protect concurrent access to global data structures
@ -388,8 +506,8 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
if snapshotID := req.GetSnapshotId(); len(snapshotID) != 0 {
if rbdSnap, ok := rbdSnapshots[snapshotID]; ok {
// if source volume ID also set, check source volume id on the cache.
if len(sourceVolumeId) != 0 && rbdSnap.SourceVolumeID != sourceVolumeId {
return nil, status.Error(codes.Unknown, fmt.Sprintf("Requested Source Volume ID %s is different from %s", sourceVolumeId, rbdSnap.SourceVolumeID))
if len(sourceVolumeID) != 0 && rbdSnap.SourceVolumeID != sourceVolumeID {
return nil, status.Errorf(codes.Unknown, "Requested Source Volume ID %s is different from %s", sourceVolumeID, rbdSnap.SourceVolumeID)
}
return &csi.ListSnapshotsResponse{
Entries: []*csi.ListSnapshotsResponse_Entry{
@ -406,15 +524,15 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
},
},
}, nil
} else {
return nil, status.Error(codes.NotFound, fmt.Sprintf("Snapshot ID %s cannot found", snapshotID))
}
return nil, status.Errorf(codes.NotFound, "Snapshot ID %s cannot found", snapshotID)
}
entries := []*csi.ListSnapshotsResponse_Entry{}
for _, rbdSnap := range rbdSnapshots {
// if source volume ID also set, check source volume id on the cache.
if len(sourceVolumeId) != 0 && rbdSnap.SourceVolumeID != sourceVolumeId {
if len(sourceVolumeID) != 0 && rbdSnap.SourceVolumeID != sourceVolumeID {
continue
}
entries = append(entries, &csi.ListSnapshotsResponse_Entry{

View File

@ -23,11 +23,14 @@ import (
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type identityServer struct {
// IdentityServer struct of rbd CSI driver with supported methods of CSI
// identity server spec.
type IdentityServer struct {
*csicommon.DefaultIdentityServer
}
func (is *identityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
// GetPluginCapabilities returns available capabilities of the rbd driver
func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{

View File

@ -19,10 +19,12 @@ package rbd
import (
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"github.com/golang/glog"
"golang.org/x/net/context"
"k8s.io/klog"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
@ -33,33 +35,48 @@ import (
"github.com/kubernetes-csi/drivers/pkg/csi-common"
)
type nodeServer struct {
// NodeServer struct of ceph rbd driver with supported methods of CSI
// node server spec
type NodeServer struct {
*csicommon.DefaultNodeServer
mounter mount.Interface
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
//TODO remove both stage and unstage methods
//once https://github.com/kubernetes-csi/drivers/pull/145 is merged
// NodeStageVolume returns unimplemented response
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// NodeUnstageVolume returns unimplemented response
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// NodePublishVolume mounts the volume mounted to the device path to the target
// path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
if !strings.HasSuffix(targetPath, "/mount") {
return nil, fmt.Errorf("rnd: malformed the value of target path: %s", targetPath)
}
s := strings.Split(strings.TrimSuffix(targetPath, "/mount"), "/")
volName := s[len(s)-1]
targetPathMutex.LockKey(targetPath)
defer targetPathMutex.UnlockKey(targetPath)
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
defer func() {
if err := targetPathMutex.UnlockKey(targetPath); err != nil {
klog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err)
}
}()
volName, err := ns.getVolumeName(req)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
return nil, err
}
isBlock := req.GetVolumeCapability().GetBlock() != nil
// Check if that target path exists properly
notMnt, err := ns.createTargetPath(targetPath, isBlock)
if err != nil {
return nil, err
}
if !notMnt {
@ -71,43 +88,125 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
}
volOptions.VolName = volName
// Mapping RBD image
devicePath, err := attachRBDImage(volOptions, volOptions.UserId, req.GetSecrets())
devicePath, err := attachRBDImage(volOptions, volOptions.UserID, req.GetSecrets())
if err != nil {
return nil, err
}
glog.V(4).Infof("rbd image: %s/%s was successfully mapped at %s\n", req.GetVolumeId(), volOptions.Pool, devicePath)
fsType := req.GetVolumeCapability().GetMount().GetFsType()
klog.V(4).Infof("rbd image: %s/%s was successfully mapped at %s\n", req.GetVolumeId(), volOptions.Pool, devicePath)
// Publish Path
err = ns.mountVolume(req, devicePath)
if err != nil {
return nil, err
}
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *NodeServer) getVolumeName(req *csi.NodePublishVolumeRequest) (string, error) {
var volName string
isBlock := req.GetVolumeCapability().GetBlock() != nil
targetPath := req.GetTargetPath()
if isBlock {
// Get volName from targetPath
s := strings.Split(targetPath, "/")
volName = s[len(s)-1]
} else {
// Get volName from targetPath
if !strings.HasSuffix(targetPath, "/mount") {
return "", fmt.Errorf("rbd: malformed the value of target path: %s", targetPath)
}
s := strings.Split(strings.TrimSuffix(targetPath, "/mount"), "/")
volName = s[len(s)-1]
}
return volName, nil
}
func (ns *NodeServer) mountVolume(req *csi.NodePublishVolumeRequest, devicePath string) error {
// Publish Path
fsType := req.GetVolumeCapability().GetMount().GetFsType()
readOnly := req.GetReadonly()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
isBlock := req.GetVolumeCapability().GetBlock() != nil
targetPath := req.GetTargetPath()
glog.V(4).Infof("target %v\nfstype %v\ndevice %v\nreadonly %v\nattributes %v\n mountflags %v\n",
targetPath, fsType, devicePath, readOnly, attrib, mountFlags)
klog.V(4).Infof("target %v\nisBlock %v\nfstype %v\ndevice %v\nreadonly %v\nattributes %v\n mountflags %v\n",
targetPath, isBlock, fsType, devicePath, readOnly, attrib, mountFlags)
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: mount.NewOsExec()}
if isBlock {
options := []string{"bind"}
if err := diskMounter.Mount(devicePath, targetPath, fsType, options); err != nil {
return err
}
} else {
options := []string{}
if readOnly {
options = append(options, "ro")
}
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: mount.NewOsExec()}
if err := diskMounter.FormatAndMount(devicePath, targetPath, fsType, options); err != nil {
return nil, err
return err
}
return &csi.NodePublishVolumeResponse{}, nil
}
return nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
func (ns *NodeServer) createTargetPath(targetPath string, isBlock bool) (bool, error) {
// Check if that target path exists properly
notMnt, err := ns.mounter.IsNotMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
if isBlock {
// create an empty file
// #nosec
targetPathFile, e := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, 0750)
if e != nil {
klog.V(4).Infof("Failed to create targetPath:%s with error: %v", targetPath, err)
return notMnt, status.Error(codes.Internal, e.Error())
}
if err = targetPathFile.Close(); err != nil {
klog.V(4).Infof("Failed to close targetPath:%s with error: %v", targetPath, err)
return notMnt, status.Error(codes.Internal, err.Error())
}
} else {
// Create a directory
if err = os.MkdirAll(targetPath, 0750); err != nil {
return notMnt, status.Error(codes.Internal, err.Error())
}
}
notMnt = true
} else {
return false, status.Error(codes.Internal, err.Error())
}
}
return notMnt, err
}
// NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
targetPath := req.GetTargetPath()
targetPathMutex.LockKey(targetPath)
defer targetPathMutex.UnlockKey(targetPath)
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
defer func() {
if err := targetPathMutex.UnlockKey(targetPath); err != nil {
klog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err)
}
}()
notMnt, err := ns.mounter.IsNotMountPoint(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
if os.IsNotExist(err) {
// targetPath has already been deleted
klog.V(4).Infof("targetPath: %s has already been deleted", targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
return nil, status.Error(codes.NotFound, err.Error())
}
if notMnt {
// TODO should consider deleting path instead of returning error,
// once all codes become ready for csi 1.0.
return nil, status.Error(codes.NotFound, "Volume not mounted")
}
@ -116,38 +215,81 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.Internal, err.Error())
}
// Unmounting the image
err = ns.mounter.Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
cnt--
if cnt != 0 {
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// Unmapping rbd device
if err := detachRBDDevice(devicePath); err != nil {
glog.V(3).Infof("failed to unmap rbd device: %s with error: %v", devicePath, err)
if err = ns.unmount(targetPath, devicePath, cnt); err != nil {
return nil, err
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (
*csi.NodeStageVolumeResponse, error) {
func (ns *NodeServer) unmount(targetPath, devicePath string, cnt int) error {
var err error
// Bind mounted device needs to be resolved by using resolveBindMountedBlockDevice
if devicePath == "devtmpfs" {
devicePath, err = resolveBindMountedBlockDevice(targetPath)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof("NodeUnpublishVolume: devicePath: %s, (original)cnt: %d\n", devicePath, cnt)
// cnt for GetDeviceNameFromMount is broken for bind mouted device,
// it counts total number of mounted "devtmpfs", instead of counting this device.
// So, forcibly setting cnt to 1 here.
// TODO : fix this properly
cnt = 1
}
return nil, status.Error(codes.Unimplemented, "")
klog.V(4).Infof("NodeUnpublishVolume: targetPath: %s, devicePath: %s\n", targetPath, devicePath)
// Unmounting the image
err = ns.mounter.Unmount(targetPath)
if err != nil {
klog.V(3).Infof("failed to unmount targetPath: %s with error: %v", targetPath, err)
return status.Error(codes.Internal, err.Error())
}
cnt--
if cnt != 0 {
// TODO should this be fixed not to success, so that driver can retry unmounting?
return nil
}
// Unmapping rbd device
if err = detachRBDDevice(devicePath); err != nil {
klog.V(3).Infof("failed to unmap rbd device: %s with error: %v", devicePath, err)
return err
}
// Remove targetPath
if err = os.RemoveAll(targetPath); err != nil {
klog.V(3).Infof("failed to remove targetPath: %s with error: %v", targetPath, err)
}
return err
}
func resolveBindMountedBlockDevice(mountPath string) (string, error) {
// #nosec
cmd := exec.Command("findmnt", "-n", "-o", "SOURCE", "--first-only", "--target", mountPath)
out, err := cmd.CombinedOutput()
if err != nil {
klog.V(2).Infof("Failed findmnt command for path %s: %s %v", mountPath, out, err)
return "", err
}
return parseFindMntResolveSource(string(out))
}
func (ns *nodeServer) NodeUnstageVolume(
ctx context.Context,
req *csi.NodeUnstageVolumeRequest) (
*csi.NodeUnstageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
// parse output of "findmnt -o SOURCE --first-only --target" and return just the SOURCE
func parseFindMntResolveSource(out string) (string, error) {
// cut trailing newline
out = strings.TrimSuffix(out, "\n")
// Check if out is a mounted device
reMnt := regexp.MustCompile("^(/[^/]+(?:/[^/]*)*)$")
if match := reMnt.FindStringSubmatch(out); match != nil {
return match[1], nil
}
// Check if out is a block device
// nolint
reBlk := regexp.MustCompile("^devtmpfs\\[(/[^/]+(?:/[^/]*)*)\\]$")
if match := reBlk.FindStringSubmatch(out); match != nil {
return fmt.Sprintf("/dev%s", match[1]), nil
}
return "", fmt.Errorf("parseFindMntResolveSource: %s doesn't match to any expected findMnt output", out)
}

View File

@ -17,14 +17,9 @@ limitations under the License.
package rbd
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -35,132 +30,46 @@ import (
// PluginFolder defines the location of rbdplugin
const (
PluginFolder = "/var/lib/kubelet/plugins_registry/csi-rbdplugin"
rbdDefaultAdminId = "admin"
rbdDefaultUserId = rbdDefaultAdminId
PluginFolder = "/var/lib/kubelet/plugins/csi-rbdplugin"
rbdDefaultAdminID = "admin"
rbdDefaultUserID = rbdDefaultAdminID
)
type rbd struct {
driver *csicommon.CSIDriver
// Driver contains the default identity,node and controller struct
type Driver struct {
cd *csicommon.CSIDriver
ids *identityServer
ns *nodeServer
cs *controllerServer
cap []*csi.VolumeCapability_AccessMode
cscap []*csi.ControllerServiceCapability
ids *IdentityServer
ns *NodeServer
cs *ControllerServer
}
var (
rbdDriver *rbd
version = "1.0.0"
)
var rbdVolumes map[string]*rbdVolume
var rbdSnapshots map[string]*rbdSnapshot
// Init checks for the persistent volume file and loads all found volumes
// into a memory structure
func init() {
rbdVolumes = map[string]*rbdVolume{}
rbdSnapshots = map[string]*rbdSnapshot{}
if _, err := os.Stat(path.Join(PluginFolder, "controller")); os.IsNotExist(err) {
glog.Infof("rbd: folder %s not found. Creating... \n", path.Join(PluginFolder, "controller"))
if err := os.Mkdir(path.Join(PluginFolder, "controller"), 0755); err != nil {
glog.Fatalf("Failed to create a controller's volumes folder with error: %v\n", err)
}
} else {
// Since "controller" folder exists, it means the rbdplugin has already been running, it means
// there might be some volumes left, they must be re-inserted into rbdVolumes map
loadExVolumes()
}
if _, err := os.Stat(path.Join(PluginFolder, "controller-snap")); os.IsNotExist(err) {
glog.Infof("rbd: folder %s not found. Creating... \n", path.Join(PluginFolder, "controller-snap"))
if err := os.Mkdir(path.Join(PluginFolder, "controller-snap"), 0755); err != nil {
glog.Fatalf("Failed to create a controller's snapshots folder with error: %v\n", err)
}
} else {
// Since "controller-snap" folder exists, it means the rbdplugin has already been running, it means
// there might be some snapshots left, they must be re-inserted into rbdSnapshots map
loadExSnapshots()
}
// NewDriver returns new rbd driver
func NewDriver() *Driver {
return &Driver{}
}
// loadExSnapshots check for any *.json files in the PluginFolder/controller-snap folder
// and loads then into rbdSnapshots map
func loadExSnapshots() {
rbdSnap := rbdSnapshot{}
files, err := ioutil.ReadDir(path.Join(PluginFolder, "controller-snap"))
if err != nil {
glog.Infof("rbd: failed to read controller's snapshots folder: %s error:%v", path.Join(PluginFolder, "controller-snap"), err)
return
}
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
fp, err := os.Open(path.Join(PluginFolder, "controller-snap", f.Name()))
if err != nil {
glog.Infof("rbd: open file: %s err %v", f.Name(), err)
continue
}
decoder := json.NewDecoder(fp)
if err = decoder.Decode(&rbdSnap); err != nil {
glog.Infof("rbd: decode file: %s err: %v", f.Name(), err)
fp.Close()
continue
}
rbdSnapshots[rbdSnap.SnapID] = &rbdSnap
}
glog.Infof("rbd: Loaded %d snapshots from %s", len(rbdSnapshots), path.Join(PluginFolder, "controller-snap"))
}
// loadExVolumes check for any *.json files in the PluginFolder/controller folder
// and loads then into rbdVolumes map
func loadExVolumes() {
rbdVol := rbdVolume{}
files, err := ioutil.ReadDir(path.Join(PluginFolder, "controller"))
if err != nil {
glog.Infof("rbd: failed to read controller's volumes folder: %s error:%v", path.Join(PluginFolder, "controller"), err)
return
}
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
fp, err := os.Open(path.Join(PluginFolder, "controller", f.Name()))
if err != nil {
glog.Infof("rbd: open file: %s err %v", f.Name(), err)
continue
}
decoder := json.NewDecoder(fp)
if err = decoder.Decode(&rbdVol); err != nil {
glog.Infof("rbd: decode file: %s err: %v", f.Name(), err)
fp.Close()
continue
}
rbdVolumes[rbdVol.VolID] = &rbdVol
}
glog.Infof("rbd: Loaded %d volumes from %s", len(rbdVolumes), path.Join(PluginFolder, "controller"))
}
func GetRBDDriver() *rbd {
return &rbd{}
}
func NewIdentityServer(d *csicommon.CSIDriver) *identityServer {
return &identityServer{
// NewIdentityServer initialize a identity server for rbd CSI driver
func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
return &IdentityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
func NewControllerServer(d *csicommon.CSIDriver) *controllerServer {
return &controllerServer{
// NewControllerServer initialize a controller server for rbd CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
}
}
func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*nodeServer, error) {
// NewNodeServer initialize a node server for rbd CSI driver.
func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, error) {
mounter := mount.New("")
if containerized {
ne, err := nsenter.NewNsenter(nsenter.DefaultHostRootFsPath, exec.New())
@ -169,37 +78,46 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*nodeServer, err
}
mounter = mount.NewNsenterMounter("", ne)
}
return &nodeServer{
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
mounter: mounter,
}, nil
}
func (rbd *rbd) Run(driverName, nodeID, endpoint string, containerized bool) {
// Run start a non-blocking grpc controller,node and identityserver for
// rbd CSI driver which can serve multiple parallel requests
func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, cachePersister util.CachePersister) {
var err error
glog.Infof("Driver: %v version: %v", driverName, version)
klog.Infof("Driver: %v version: %v", driverName, version)
// Initialize default library driver
rbd.driver = csicommon.NewCSIDriver(driverName, version, nodeID)
if rbd.driver == nil {
glog.Fatalln("Failed to initialize CSI Driver.")
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
if r.cd == nil {
klog.Fatalln("Failed to initialize CSI Driver.")
}
rbd.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
})
rbd.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
r.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
// Create GRPC servers
rbd.ids = NewIdentityServer(rbd.driver)
rbd.ns, err = NewNodeServer(rbd.driver, containerized)
r.ids = NewIdentityServer(r.cd)
r.ns, err = NewNodeServer(r.cd, containerized)
if err != nil {
glog.Fatalf("failed to start node server, err %v\n", err)
klog.Fatalf("failed to start node server, err %v\n", err)
}
rbd.cs = NewControllerServer(rbd.driver)
r.cs = NewControllerServer(r.cd, cachePersister)
if err = r.cs.LoadExDataFromMetadataStore(); err != nil {
klog.Fatalf("failed to load metadata from store, err %v\n", err)
}
s := csicommon.NewNonBlockingGRPCServer()
s.Start(endpoint, rbd.ids, rbd.cs, rbd.ns)
s.Start(endpoint, r.ids, r.cs, r.ns)
s.Wait()
}

View File

@ -25,12 +25,15 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
const (
envHostRootFS = "HOST_ROOTFS"
rbdTonbd = "rbd-nbd"
rbd = "rbd"
nbd = "nbd"
)
var (
@ -46,47 +49,37 @@ func init() {
hasNBD = checkRbdNbdTools()
}
func getDevFromImageAndPool(pool, image string) (string, bool) {
device, found := getRbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
device, found = getNbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
return "", false
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
sysPath := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sysPath); err == nil {
for _, f := range dirs {
// Pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// First match pool, then match name.
poolFile := path.Join(sys_path, name, "pool")
poolFile := path.Join(sysPath, name, "pool")
// #nosec
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
glog.V(4).Infof("error reading %s: %v", poolFile, err)
klog.V(4).Infof("error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
glog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
klog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := path.Join(sys_path, name, "name")
imgFile := path.Join(sysPath, name, "name")
// #nosec
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
glog.V(4).Infof("error reading %s: %v", imgFile, err)
klog.V(4).Infof("error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
glog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
klog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// Found a match, check if device exists.
@ -109,7 +102,7 @@ func getMaxNbds() (int, error) {
return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err)
}
glog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
klog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath)
if err != nil {
@ -121,7 +114,7 @@ func getMaxNbds() (int, error) {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err)
}
glog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
klog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
return maxNbds, nil
}
@ -138,27 +131,40 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
maxNbds, maxNbdsErr := getMaxNbds()
if maxNbdsErr != nil {
glog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
klog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
return "", false
}
for i := 0; i < maxNbds; i++ {
nbdPath := basePath + strconv.Itoa(i)
devicePath, err := getnbdDevicePath(nbdPath, imgPath, i)
if err != nil {
continue
}
return devicePath, true
}
return "", false
}
func getnbdDevicePath(nbdPath, imgPath string, count int) (string, error) {
_, err := os.Lstat(nbdPath)
if err != nil {
glog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
continue
klog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
return "", err
}
// #nosec
pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid"))
if err != nil {
glog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
continue
klog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
return "", err
}
cmdlineFileName := path.Join(hostRootFS, "/proc", strings.TrimSpace(string(pidBytes)), "cmdline")
// #nosec
rawCmdline, err := ioutil.ReadFile(cmdlineFileName)
if err != nil {
glog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
continue
klog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
return "", err
}
cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool {
return r == '\u0000'
@ -166,23 +172,22 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
// Check if this process is mapping a rbd device.
// Only accepted pattern of cmdline is from execRbdMap:
// rbd-nbd map pool/image ...
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" {
glog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
continue
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != rbdTonbd || cmdlineArgs[1] != "map" {
klog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
return "", err
}
if cmdlineArgs[2] != imgPath {
glog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
klog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
nbdPath, imgPath, cmdlineArgs[2])
continue
return "", err
}
devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i))
devicePath := path.Join("/dev", "nbd"+strconv.Itoa(count))
if _, err := os.Lstat(devicePath); err != nil {
glog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
continue
klog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
return "", err
}
return devicePath, true
}
return "", false
return devicePath, nil
}
// Stat a path, if it doesn't exist, retry maxRetries times.
@ -208,41 +213,44 @@ func waitForPath(pool, image string, maxRetries int, useNbdDriver bool) (string,
func checkRbdNbdTools() bool {
_, err := execCommand("modprobe", []string{"nbd"})
if err != nil {
glog.V(3).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
klog.V(3).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
return false
}
if _, err := execCommand("rbd-nbd", []string{"--version"}); err != nil {
glog.V(3).Infof("rbd-nbd: running rbd-nbd --version failed with error %v", err)
if _, err := execCommand(rbdTonbd, []string{"--version"}); err != nil {
klog.V(3).Infof("rbd-nbd: running rbd-nbd --version failed with error %v", err)
return false
}
glog.V(3).Infof("rbd-nbd tools were found.")
klog.V(3).Infof("rbd-nbd tools were found.")
return true
}
func attachRBDImage(volOptions *rbdVolume, userId string, credentials map[string]string) (string, error) {
func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string]string) (string, error) {
var err error
var output []byte
image := volOptions.VolName
imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image)
useNBD := false
cmdName := "rbd"
moduleName := "rbd"
if volOptions.Mounter == "rbd-nbd" && hasNBD {
moduleName := rbd
if volOptions.Mounter == rbdTonbd && hasNBD {
useNBD = true
cmdName = "rbd-nbd"
moduleName = "nbd"
moduleName = nbd
}
devicePath, found := waitForPath(volOptions.Pool, image, 1, useNBD)
if !found {
attachdetachMutex.LockKey(string(imagePath))
defer attachdetachMutex.UnlockKey(string(imagePath))
attachdetachMutex.LockKey(imagePath)
defer func() {
if err = attachdetachMutex.UnlockKey(imagePath); err != nil {
klog.Warningf("failed to unlock mutex imagepath:%s %v", imagePath, err)
}
}()
_, err = execCommand("modprobe", []string{moduleName})
if err != nil {
glog.Warningf("rbd: failed to load rbd kernel module:%v", err)
klog.Warningf("rbd: failed to load rbd kernel module:%v", err)
return "", err
}
backoff := wait.Backoff{
@ -250,8 +258,58 @@ func attachRBDImage(volOptions *rbdVolume, userId string, credentials map[string
Factor: rbdImageWatcherFactor,
Steps: rbdImageWatcherSteps,
}
err = waitForrbdImage(backoff, volOptions, userID, credentials)
if err != nil {
return "", err
}
devicePath, err = createPath(volOptions, userID, credentials)
}
return devicePath, err
}
func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (string, error) {
image := volOpt.VolName
imagePath := fmt.Sprintf("%s/%s", volOpt.Pool, image)
mon, err := getMon(volOpt, creds)
if err != nil {
return "", err
}
klog.V(5).Infof("rbd: map mon %s", mon)
key, err := getRBDKey(userID, creds)
if err != nil {
return "", err
}
useNBD := false
cmdName := rbd
if volOpt.Mounter == rbdTonbd && hasNBD {
useNBD = true
cmdName = rbdTonbd
}
output, err := execCommand(cmdName, []string{
"map", imagePath, "--id", userID, "-m", mon, "--key=" + key})
if err != nil {
klog.Warningf("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
devicePath, found := waitForPath(volOpt.Pool, image, 10, useNBD)
if !found {
return "", fmt.Errorf("could not map image %s, Timeout after 10s", imagePath)
}
return devicePath, nil
}
func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string, credentials map[string]string) error {
image := volOptions.VolName
imagePath := fmt.Sprintf("%s/%s", volOptions.Pool, image)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := rbdStatus(volOptions, userId, credentials)
used, rbdOutput, err := rbdStatus(volOptions, userID, credentials)
if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
}
@ -259,47 +317,21 @@ func attachRBDImage(volOptions *rbdVolume, userId string, credentials map[string
})
// return error if rbd image has not become available for the specified timeout
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("rbd image %s is still being used", imagePath)
return fmt.Errorf("rbd image %s is still being used", imagePath)
}
// return error if any other errors were encountered during wating for the image to become available
if err != nil {
return "", err
}
mon, err := getMon(volOptions, credentials)
if err != nil {
return "", err
}
glog.V(5).Infof("rbd: map mon %s", mon)
key, err := getRBDKey(userId, credentials)
if err != nil {
return "", err
}
output, err = execCommand(cmdName, []string{
"map", imagePath, "--id", userId, "-m", mon, "--key=" + key})
if err != nil {
glog.Warningf("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
devicePath, found = waitForPath(volOptions.Pool, image, 10, useNBD)
if !found {
return "", fmt.Errorf("Could not map image %s, Timeout after 10s", imagePath)
}
}
return devicePath, nil
// return error if any other errors were encountered during waiting for the image to become available
return err
}
func detachRBDDevice(devicePath string) error {
var err error
var output []byte
glog.V(3).Infof("rbd: unmap device %s", devicePath)
klog.V(3).Infof("rbd: unmap device %s", devicePath)
cmdName := "rbd"
cmdName := rbd
if strings.HasPrefix(devicePath, "/dev/nbd") {
cmdName = "rbd-nbd"
cmdName = rbdTonbd
}
output, err = execCommand(cmdName, []string{"unmap", devicePath})

View File

@ -17,27 +17,20 @@ limitations under the License.
package rbd
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/keymutex"
)
const (
imageWatcherStr = "watcher="
rbdImageFormat1 = "1"
rbdImageFormat2 = "2"
imageSizeStr = "size "
sizeDivStr = " MB in"
kubeLockMagic = "kubelet_lock_magic_"
// The following three values are used for 30 seconds timeout
// while waiting for RBD Watcher to expire.
rbdImageWatcherInitDelay = 1 * time.Second
@ -55,8 +48,8 @@ type rbdVolume struct {
ImageFormat string `json:"imageFormat"`
ImageFeatures string `json:"imageFeatures"`
VolSize int64 `json:"volSize"`
AdminId string `json:"adminId"`
UserId string `json:"userId"`
AdminID string `json:"adminId"`
UserID string `json:"userId"`
Mounter string `json:"mounter"`
}
@ -70,8 +63,8 @@ type rbdSnapshot struct {
Pool string `json:"pool"`
CreatedAt int64 `json:"createdAt"`
SizeBytes int64 `json:"sizeBytes"`
AdminId string `json:"adminId"`
UserId string `json:"userId"`
AdminID string `json:"adminId"`
UserID string `json:"userId"`
}
var (
@ -105,19 +98,20 @@ func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) {
// if mons are set in secret, retrieve them
if len(pOpts.MonValueFromSecret) == 0 {
// yet another sanity check
return "", fmt.Errorf("either monitors or monValueFromSecret must be set")
return "", errors.New("either monitors or monValueFromSecret must be set")
}
if val, ok := credentials[pOpts.MonValueFromSecret]; !ok {
val, ok := credentials[pOpts.MonValueFromSecret]
if !ok {
return "", fmt.Errorf("mon data %s is not set in secret", pOpts.MonValueFromSecret)
} else {
mon = val
}
mon = val
}
return mon, nil
}
// CreateImage creates a new ceph image with provision and volume options.
func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map[string]string) error {
func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map[string]string) error {
var output []byte
mon, err := getMon(pOpts, credentials)
@ -128,16 +122,16 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map
image := pOpts.VolName
volSzGB := fmt.Sprintf("%dG", volSz)
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
if pOpts.ImageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool, adminId, key)
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool, adminId, key)
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool)
}
args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminId, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat}
args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat}
if pOpts.ImageFormat == rbdImageFormat2 {
args = append(args, "--image-feature", pOpts.ImageFeatures)
}
@ -151,15 +145,15 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminId string, credentials map
}
// rbdStatus checks if there is watcher on the image.
// It returns true if there is a watcher onthe image, otherwise returns false.
func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) (bool, string, error) {
// It returns true if there is a watcher on the image, otherwise returns false.
func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (bool, string, error) {
var output string
var cmd []byte
image := pOpts.VolName
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
key, err := getRBDKey(userId, credentials)
key, err := getRBDKey(userID, credentials)
if err != nil {
return false, "", err
}
@ -169,14 +163,14 @@ func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) (
return false, "", err
}
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, userId, key)
args := []string{"status", image, "--pool", pOpts.Pool, "-m", mon, "--id", userId, "--key=" + key}
klog.V(4).Infof("rbd: status %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"status", image, "--pool", pOpts.Pool, "-m", mon, "--id", userID, "--key=" + key}
cmd, err = execCommand("rbd", args)
output = string(cmd)
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
glog.Errorf("rbd cmd not found")
klog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
@ -188,27 +182,26 @@ func rbdStatus(pOpts *rbdVolume, userId string, credentials map[string]string) (
}
if strings.Contains(output, imageWatcherStr) {
glog.V(4).Infof("rbd: watchers on %s: %s", image, output)
klog.V(4).Infof("rbd: watchers on %s: %s", image, output)
return true, output, nil
} else {
glog.Warningf("rbd: no watchers on %s", image)
return false, output, nil
}
klog.Warningf("rbd: no watchers on %s", image)
return false, output, nil
}
// DeleteImage deletes a ceph image with provision and volume options.
func deleteRBDImage(pOpts *rbdVolume, adminId string, credentials map[string]string) error {
func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]string) error {
var output []byte
image := pOpts.VolName
found, _, err := rbdStatus(pOpts, adminId, credentials)
found, _, err := rbdStatus(pOpts, adminID, credentials)
if err != nil {
return err
}
if found {
glog.Info("rbd is still being used ", image)
klog.Info("rbd is still being used ", image)
return fmt.Errorf("rbd %s is still being used", image)
}
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
@ -217,17 +210,18 @@ func deleteRBDImage(pOpts *rbdVolume, adminId string, credentials map[string]str
return err
}
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key)
args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: rm %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"rm", image, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)
if err == nil {
return nil
}
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return err
}
func execCommand(command string, args []string) ([]byte, error) {
// #nosec
cmd := exec.Command(command, args...)
return cmd.CombinedOutput()
}
@ -237,13 +231,13 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
rbdVol := &rbdVolume{}
rbdVol.Pool, ok = volOptions["pool"]
if !ok {
return nil, fmt.Errorf("Missing required parameter pool")
return nil, fmt.Errorf("missing required parameter pool")
}
rbdVol.Monitors, ok = volOptions["monitors"]
if !ok {
// if mons are not set in options, check if they are set in secret
if rbdVol.MonValueFromSecret, ok = volOptions["monValueFromSecret"]; !ok {
return nil, fmt.Errorf("Either monitors or monValueFromSecret must be set")
return nil, fmt.Errorf("either monitors or monValueFromSecret must be set")
}
}
rbdVol.ImageFormat, ok = volOptions["imageFormat"]
@ -253,8 +247,8 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
if rbdVol.ImageFormat == rbdImageFormat2 {
// if no image features is provided, it results in empty string
// which disable all RBD image format 2 features as we expected
imageFeatures, ok := volOptions["imageFeatures"]
if ok {
imageFeatures, found := volOptions["imageFeatures"]
if found {
arr := strings.Split(imageFeatures, ",")
for _, f := range arr {
if !supportedFeatures.Has(f) {
@ -265,42 +259,46 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
}
}
rbdVol.AdminId, ok = volOptions["adminid"]
getCredsFromVol(rbdVol, volOptions)
return rbdVol, nil
}
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) {
var ok bool
rbdVol.AdminID, ok = volOptions["adminid"]
if !ok {
rbdVol.AdminId = rbdDefaultAdminId
rbdVol.AdminID = rbdDefaultAdminID
}
rbdVol.UserId, ok = volOptions["userid"]
rbdVol.UserID, ok = volOptions["userid"]
if !ok {
rbdVol.UserId = rbdDefaultUserId
rbdVol.UserID = rbdDefaultUserID
}
rbdVol.Mounter, ok = volOptions["mounter"]
if !ok {
rbdVol.Mounter = rbdDefaultMounter
}
return rbdVol, nil
}
func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) {
var ok bool
rbdSnap := &rbdSnapshot{}
rbdSnap.Pool, ok = snapOptions["pool"]
if !ok {
return nil, fmt.Errorf("Missing required parameter pool")
return nil, fmt.Errorf("missing required parameter pool")
}
rbdSnap.Monitors, ok = snapOptions["monitors"]
if !ok {
// if mons are not set in options, check if they are set in secret
if rbdSnap.MonValueFromSecret, ok = snapOptions["monValueFromSecret"]; !ok {
return nil, fmt.Errorf("Either monitors or monValueFromSecret must be set")
return nil, fmt.Errorf("either monitors or monValueFromSecret must be set")
}
}
rbdSnap.AdminId, ok = snapOptions["adminid"]
rbdSnap.AdminID, ok = snapOptions["adminid"]
if !ok {
rbdSnap.AdminId = rbdDefaultAdminId
rbdSnap.AdminID = rbdDefaultAdminID
}
rbdSnap.UserId, ok = snapOptions["userid"]
rbdSnap.UserID, ok = snapOptions["userid"]
if !ok {
rbdSnap.UserId = rbdDefaultUserId
rbdSnap.UserID = rbdDefaultUserID
}
return rbdSnap, nil
@ -316,95 +314,6 @@ func hasSnapshotFeature(imageFeatures string) bool {
return false
}
func persistVolInfo(image string, persistentStoragePath string, volInfo *rbdVolume) error {
file := path.Join(persistentStoragePath, image+".json")
fp, err := os.Create(file)
if err != nil {
glog.Errorf("rbd: failed to create persistent storage file %s with error: %v\n", file, err)
return errors.Wrapf(err, "rbd: create error for %s", file)
}
defer fp.Close()
encoder := json.NewEncoder(fp)
if err = encoder.Encode(volInfo); err != nil {
glog.Errorf("rbd: failed to encode volInfo: %+v for file: %s with error: %v\n", volInfo, file, err)
return errors.Wrap(err, "rbd: encode error")
}
glog.Infof("rbd: successfully saved volInfo: %+v into file: %s\n", volInfo, file)
return nil
}
func loadVolInfo(image string, persistentStoragePath string, volInfo *rbdVolume) error {
file := path.Join(persistentStoragePath, image+".json")
fp, err := os.Open(file)
if err != nil {
return errors.Wrapf(err, "rbd: open error for %s", file)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(volInfo); err != nil {
return errors.Wrap(err, "rbd: decode error")
}
return nil
}
func deleteVolInfo(image string, persistentStoragePath string) error {
file := path.Join(persistentStoragePath, image+".json")
glog.Infof("rbd: Deleting file for Volume: %s at: %s resulting path: %+v\n", image, persistentStoragePath, file)
err := os.Remove(file)
if err != nil {
if err != os.ErrNotExist {
return errors.Wrapf(err, "rbd: error removing file %s", file)
}
}
return nil
}
func persistSnapInfo(snapshot string, persistentStoragePath string, snapInfo *rbdSnapshot) error {
file := path.Join(persistentStoragePath, snapshot+".json")
fp, err := os.Create(file)
if err != nil {
glog.Errorf("rbd: failed to create persistent storage file %s with error: %v\n", file, err)
return errors.Wrapf(err, "rbd: create error for %s", file)
}
defer fp.Close()
encoder := json.NewEncoder(fp)
if err = encoder.Encode(snapInfo); err != nil {
glog.Errorf("rbd: failed to encode snapInfo: %+v for file: %s with error: %v\n", snapInfo, file, err)
return errors.Wrap(err, "rbd: encode error")
}
glog.Infof("rbd: successfully saved snapInfo: %+v into file: %s\n", snapInfo, file)
return nil
}
func loadSnapInfo(snapshot string, persistentStoragePath string, snapInfo *rbdSnapshot) error {
file := path.Join(persistentStoragePath, snapshot+".json")
fp, err := os.Open(file)
if err != nil {
return errors.Wrapf(err, "rbd: open error for %s", file)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(snapInfo); err != nil {
return errors.Wrap(err, "rbd: decode error")
}
return nil
}
func deleteSnapInfo(snapshot string, persistentStoragePath string) error {
file := path.Join(persistentStoragePath, snapshot+".json")
glog.Infof("rbd: Deleting file for Snapshot: %s at: %s resulting path: %+v\n", snapshot, persistentStoragePath, file)
err := os.Remove(file)
if err != nil {
if err != os.ErrNotExist {
return errors.Wrapf(err, "rbd: error removing file %s", file)
}
}
return nil
}
func getRBDVolumeByID(volumeID string) (*rbdVolume, error) {
if rbdVol, ok := rbdVolumes[volumeID]; ok {
return rbdVol, nil
@ -436,24 +345,24 @@ func getSnapMon(pOpts *rbdSnapshot, credentials map[string]string) (string, erro
// if mons are set in secret, retrieve them
if len(pOpts.MonValueFromSecret) == 0 {
// yet another sanity check
return "", fmt.Errorf("either monitors or monValueFromSecret must be set")
return "", errors.New("either monitors or monValueFromSecret must be set")
}
if val, ok := credentials[pOpts.MonValueFromSecret]; !ok {
val, ok := credentials[pOpts.MonValueFromSecret]
if !ok {
return "", fmt.Errorf("mon data %s is not set in secret", pOpts.MonValueFromSecret)
} else {
mon = val
}
mon = val
}
return mon, nil
}
func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error {
func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte
image := pOpts.VolName
snapID := pOpts.SnapID
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
@ -462,8 +371,8 @@ func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]
return err
}
glog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key)
args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: snap protect %s using mon %s, pool %s ", image, mon, pOpts.Pool)
args := []string{"snap", "protect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)
@ -474,7 +383,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]
return nil
}
func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error {
func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte
mon, err := getSnapMon(pOpts, credentials)
@ -485,12 +394,12 @@ func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s
image := pOpts.VolName
snapID := pOpts.SnapID
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
glog.V(4).Infof("rbd: snap create %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key)
args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: snap create %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "create", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)
@ -501,7 +410,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s
return nil
}
func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error {
func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte
mon, err := getSnapMon(pOpts, credentials)
@ -512,12 +421,12 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[strin
image := pOpts.VolName
snapID := pOpts.SnapID
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
glog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key)
args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: snap unprotect %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "unprotect", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)
@ -528,7 +437,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[strin
return nil
}
func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]string) error {
func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte
mon, err := getSnapMon(pOpts, credentials)
@ -539,12 +448,12 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s
image := pOpts.VolName
snapID := pOpts.SnapID
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
glog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.Pool, adminId, key)
args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: snap rm %s using mon %s, pool %s", image, mon, pOpts.Pool)
args := []string{"snap", "rm", "--pool", pOpts.Pool, "--snap", snapID, image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)
@ -555,7 +464,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminId string, credentials map[string]s
return nil
}
func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminId string, credentials map[string]string) error {
func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
var output []byte
mon, err := getMon(pVolOpts, credentials)
@ -566,12 +475,12 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminId string
image := pVolOpts.VolName
snapID := pSnapOpts.SnapID
key, err := getRBDKey(adminId, credentials)
key, err := getRBDKey(adminID, credentials)
if err != nil {
return err
}
glog.V(4).Infof("rbd: clone %s using mon %s, pool %s id %s key %s", image, mon, pVolOpts.Pool, adminId, key)
args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.VolName + "@" + snapID, pVolOpts.Pool + "/" + image, "--id", adminId, "-m", mon, "--key=" + key}
klog.V(4).Infof("rbd: clone %s using mon %s, pool %s", image, mon, pVolOpts.Pool)
args := []string{"clone", pSnapOpts.Pool + "/" + pSnapOpts.VolName + "@" + snapID, pVolOpts.Pool + "/" + image, "--id", adminID, "-m", mon, "--key=" + key}
output, err = execCommand("rbd", args)

View File

@ -0,0 +1,56 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"k8s.io/klog"
)
const (
//PluginFolder defines location of plugins
PluginFolder = "/var/lib/kubelet/plugins"
)
// ForAllFunc stores metadata with identifier
type ForAllFunc func(identifier string) error
// CachePersister interface implemented for store
type CachePersister interface {
Create(identifier string, data interface{}) error
Get(identifier string, data interface{}) error
ForAll(pattern string, destObj interface{}, f ForAllFunc) error
Delete(identifier string) error
}
// NewCachePersister returns CachePersister based on store
func NewCachePersister(metadataStore, driverName string) (CachePersister, error) {
if metadataStore == "k8s_configmap" {
klog.Infof("cache-perister: using kubernetes configmap as metadata cache persister")
k8scm := &K8sCMCache{}
k8scm.Client = NewK8sClient()
k8scm.Namespace = GetK8sNamespace()
return k8scm, nil
} else if metadataStore == "node" {
klog.Infof("cache-persister: using node as metadata cache persister")
nc := &NodeCache{}
nc.BasePath = PluginFolder + "/" + driverName
return nc, nil
}
return nil, errors.New("cache-persister: couldn't parse metadatastorage flag")
}

179
pkg/util/k8scmcache.go Normal file
View File

@ -0,0 +1,179 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"os"
"regexp"
"github.com/pkg/errors"
"k8s.io/klog"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// K8sCMCache to store metadata
type K8sCMCache struct {
Client *k8s.Clientset
Namespace string
}
const (
defaultNamespace = "default"
cmLabel = "csi-metadata"
cmDataKey = "content"
csiMetadataLabelAttr = "com.ceph.ceph-csi/metadata"
)
// GetK8sNamespace returns pod namespace. if pod namespace is empty
// it returns default namespace
func GetK8sNamespace() string {
namespace := os.Getenv("POD_NAMESPACE")
if namespace == "" {
return defaultNamespace
}
return namespace
}
// NewK8sClient create kubernetes client
func NewK8sClient() *k8s.Clientset {
var cfg *rest.Config
var err error
cPath := os.Getenv("KUBERNETES_CONFIG_PATH")
if cPath != "" {
cfg, err = clientcmd.BuildConfigFromFlags("", cPath)
if err != nil {
klog.Errorf("Failed to get cluster config with error: %v\n", err)
os.Exit(1)
}
} else {
cfg, err = rest.InClusterConfig()
if err != nil {
klog.Errorf("Failed to get cluster config with error: %v\n", err)
os.Exit(1)
}
}
client, err := k8s.NewForConfig(cfg)
if err != nil {
klog.Errorf("Failed to create client with error: %v\n", err)
os.Exit(1)
}
return client
}
func (k8scm *K8sCMCache) getMetadataCM(resourceID string) (*v1.ConfigMap, error) {
cm, err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Get(resourceID, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
//ForAll list the metadata in configmaps and filters outs based on the pattern
func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error {
listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", csiMetadataLabelAttr, cmLabel)}
cms, err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).List(listOpts)
if err != nil {
return errors.Wrap(err, "k8s-cm-cache: failed to list metadata configmaps")
}
for _, cm := range cms.Items {
data := cm.Data[cmDataKey]
match, err := regexp.MatchString(pattern, cm.ObjectMeta.Name)
if err != nil {
continue
}
if !match {
continue
}
if err = json.Unmarshal([]byte(data), destObj); err != nil {
return errors.Wrap(err, "k8s-cm-cache: unmarshal error")
}
if err = f(cm.ObjectMeta.Name); err != nil {
return err
}
}
return nil
}
// Create stores the metadata in configmaps with identifier name
func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error {
cm, err := k8scm.getMetadataCM(identifier)
if cm != nil && err == nil {
klog.V(4).Infof("k8s-cm-cache: configmap already exists, skipping configmap creation")
return nil
}
dataJSON, err := json.Marshal(data)
if err != nil {
return errors.Wrap(err, "k8s-cm-cache: marshal error")
}
cm = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: identifier,
Namespace: k8scm.Namespace,
Labels: map[string]string{
csiMetadataLabelAttr: cmLabel,
},
},
Data: map[string]string{},
}
cm.Data[cmDataKey] = string(dataJSON)
_, err = k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Create(cm)
if err != nil {
if apierrs.IsAlreadyExists(err) {
klog.V(4).Infof("k8s-cm-cache: configmap already exists")
return nil
}
return errors.Wrapf(err, "k8s-cm-cache: couldn't persist %s metadata as configmap", identifier)
}
klog.V(4).Infof("k8s-cm-cache: configmap %s successfully created\n", identifier)
return nil
}
// Get retrieves the metadata in configmaps with identifier name
func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error {
cm, err := k8scm.getMetadataCM(identifier)
if err != nil {
return err
}
err = json.Unmarshal([]byte(cm.Data[cmDataKey]), data)
if err != nil {
return errors.Wrap(err, "k8s-cm-cache: unmarshal error")
}
return nil
}
// Delete deletes the metadata in configmaps with identifier name
func (k8scm *K8sCMCache) Delete(identifier string) error {
err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Delete(identifier, nil)
if err != nil {
return errors.Wrapf(err, "k8s-cm-cache: couldn't delete metadata configmap %s", identifier)
}
klog.V(4).Infof("k8s-cm-cache: successfully deleted metadata configmap %s", identifier)
return nil
}

45
pkg/util/log.go Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"flag"
"os"
"k8s.io/klog"
)
// InitLogging initializes klog alongside glog
// XXX: This is just a temporary solution till all deps move to klog
func InitLogging() {
if err := flag.Set("logtostderr", "true"); err != nil {
klog.Errorf("failed to set logtostderr flag: %v", err)
os.Exit(1)
}
flag.Parse()
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
// Sync klog flags with glog
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
if f2 := klogFlags.Lookup(f1.Name); f2 != nil {
f2.Value.Set(f1.Value.String()) // nolint: errcheck, gosec
}
})
}

161
pkg/util/nodecache.go Normal file
View File

@ -0,0 +1,161 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
"k8s.io/klog"
)
// NodeCache to store metadata
type NodeCache struct {
BasePath string
}
var cacheDir = "controller"
var errDec = errors.New("file not found")
// EnsureCacheDirectory creates cache directory if not present
func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error {
fullPath := path.Join(nc.BasePath, cacheDir)
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
// #nosec
if err := os.Mkdir(fullPath, 0755); err != nil {
return errors.Wrapf(err, "node-cache: failed to create %s folder with error: %v", fullPath, err)
}
}
return nil
}
//ForAll list the metadata in Nodecache and filters outs based on the pattern
func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error {
err := nc.EnsureCacheDirectory(cacheDir)
if err != nil {
return errors.Wrap(err, "node-cache: couldn't ensure cache directory exists")
}
files, err := ioutil.ReadDir(path.Join(nc.BasePath, cacheDir))
if err != nil {
return errors.Wrapf(err, "node-cache: failed to read %s folder", nc.BasePath)
}
path := path.Join(nc.BasePath, cacheDir)
for _, file := range files {
err = decodeObj(path, pattern, file, destObj)
if err == errDec {
continue
} else if err == nil {
if err = f(strings.TrimSuffix(file.Name(), filepath.Ext(file.Name()))); err != nil {
return err
}
}
return err
}
return nil
}
func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{}) error {
match, err := regexp.MatchString(pattern, file.Name())
if err != nil || !match {
return errDec
}
if !strings.HasSuffix(file.Name(), ".json") {
return errDec
}
// #nosec
fp, err := os.Open(path.Join(filepath, file.Name()))
if err != nil {
klog.Infof("node-cache: open file: %s err %v", file.Name(), err)
return errDec
}
decoder := json.NewDecoder(fp)
if err = decoder.Decode(destObj); err != nil {
if err = fp.Close(); err != nil {
return errors.Wrapf(err, "failed to close file %s", file.Name())
}
return errors.Wrapf(err, "node-cache: couldn't decode file %s", file.Name())
}
return nil
}
// Create creates the metadata file in cache directory with identifier name
func (nc *NodeCache) Create(identifier string, data interface{}) error {
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
fp, err := os.Create(file)
if err != nil {
return errors.Wrapf(err, "node-cache: failed to create metadata storage file %s\n", file)
}
defer func() {
if err = fp.Close(); err != nil {
klog.Warningf("failed to close file:%s %v", fp.Name(), err)
}
}()
encoder := json.NewEncoder(fp)
if err = encoder.Encode(data); err != nil {
return errors.Wrapf(err, "node-cache: failed to encode metadata for file: %s\n", file)
}
klog.V(4).Infof("node-cache: successfully saved metadata into file: %s\n", file)
return nil
}
// Get retrieves the metadata from cache directory with identifier name
func (nc *NodeCache) Get(identifier string, data interface{}) error {
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
// #nosec
fp, err := os.Open(file)
if err != nil {
return errors.Wrapf(err, "node-cache: open error for %s", file)
}
defer func() {
if err = fp.Close(); err != nil {
klog.Warningf("failed to close file:%s %v", fp.Name(), err)
}
}()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(data); err != nil {
return errors.Wrap(err, "rbd: decode error")
}
return nil
}
// Delete deletes the metadata file from cache directory with identifier name
func (nc *NodeCache) Delete(identifier string) error {
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
err := os.Remove(file)
if err != nil {
if err != os.ErrNotExist {
return errors.Wrapf(err, "node-cache: error removing file %s", file)
}
}
klog.V(4).Infof("node-cache: successfully deleted metadata storage file at: %+v\n", file)
return nil
}

View File

@ -1,68 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"path"
"github.com/ceph/ceph-csi/pkg/rbd"
"github.com/golang/glog"
)
func init() {
flag.Set("logtostderr", "true")
}
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "csi-rbdplugin", "name of the driver")
nodeID = flag.String("nodeid", "", "node id")
containerized = flag.Bool("containerized", true, "whether run as containerized")
)
func main() {
flag.Parse()
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "controller")); err != nil {
glog.Errorf("failed to create persistent storage for controller %v", err)
os.Exit(1)
}
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "node")); err != nil {
glog.Errorf("failed to create persistent storage for node %v", err)
os.Exit(1)
}
handle()
os.Exit(0)
}
func handle() {
driver := rbd.GetRBDDriver()
driver.Run(*driverName, *nodeID, *endpoint, *containerized)
}
func createPersistentStorage(persistentStoragePath string) error {
if _, err := os.Stat(persistentStoragePath); os.IsNotExist(err) {
if err := os.MkdirAll(persistentStoragePath, os.FileMode(0755)); err != nil {
return err
}
} else {
}
return nil
}

12
scripts/lint-go.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
set -o pipefail
if [[ -x "$(command -v gometalinter)" ]]; then
gometalinter -j "${GO_METALINTER_THREADS:-1}" \
--sort path --sort line --sort column --deadline=10m \
--enable=misspell --enable=staticcheck \
--vendor "${@-./...}"
else
echo "WARNING: gometalinter not found, skipping lint tests" >&2
fi

49
scripts/lint-text.sh Executable file
View File

@ -0,0 +1,49 @@
#! /bin/bash
# vim: set ts=4 sw=4 et :
# Usage: pre-commit.sh [--require-all]
# --require-all Fail instead of warn if a checker is not found
set -e
# Run checks from root of the repo
scriptdir="$(dirname "$(realpath "$0")")"
cd "$scriptdir/.."
# run_check <file_regex> <checker_exe> [optional args to checker...]
function run_check() {
regex="$1"
shift
exe="$1"
shift
if [ -x "$(command -v "$exe")" ]; then
find . -path ./vendor -prune -o -regextype egrep -iregex "$regex" -print0 |
xargs -0rt -n1 "$exe" "$@"
elif [ "$all_required" -eq 0 ]; then
echo "Warning: $exe not found... skipping some tests."
else
echo "FAILED: All checks required, but $exe not found!"
exit 1
fi
}
all_required=0
if [ "x$1" == "x--require-all" ]; then
all_required=1
fi
# markdownlint: https://github.com/markdownlint/markdownlint
# https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md
# Install via: gem install mdl
run_check '.*\.md' mdl --style scripts/mdl-style.rb
# Install via: dnf install shellcheck
run_check '.*\.(ba)?sh' shellcheck
run_check '.*\.(ba)?sh' bash -n
# Install via: pip install yamllint
# disable yamlint chekck for helm chats
run_check '.*\.ya?ml' yamllint -s -d "{extends: default, rules: {line-length: {allow-non-breakable-inline-mappings: true}},ignore: deploy/rbd/helm/templates/*.yaml}"
echo "ALL OK."

9
scripts/mdl-style.rb Normal file
View File

@ -0,0 +1,9 @@
all
#Refer below url for more information about the markdown rules.
#https://github.com/markdownlint/markdownlint/blob/master/docs/RULES.md
rule 'MD013', :code_blocks => false, :tables => false
exclude_rule 'MD040' # Fenced code blocks should have a language specified
exclude_rule 'MD041' # First line in file should be a top level header

42
scripts/test-go.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/bash
GOPACKAGES="$(go list ./... | grep -v vendor | grep -v e2e)"
COVERFILE="${GO_COVER_DIR}profile.cov"
# no special options, exec to go test w/ all pkgs
if [[ ${TEST_EXITFIRST} != "yes" && -z ${TEST_COVERAGE} ]]; then
# shellcheck disable=SC2086
exec go test ${GOPACKAGES}
fi
# our options are set so we need to handle each go package one
# at at time
if [[ ${TEST_COVERAGE} ]]; then
GOTESTOPTS="-covermode=count -coverprofile=cover.out"
echo "mode: count" > "${COVERFILE}"
fi
failed=0
for gopackage in ${GOPACKAGES}; do
echo "--- testing: ${gopackage} ---"
# shellcheck disable=SC2086
go test ${GOTESTOPTS} "${gopackage}" || ((failed+=1))
if [[ -f cover.out ]]; then
# Append to coverfile
grep -v "^mode: count" cover.out >> "${COVERFILE}"
fi
if [[ ${TEST_COVERAGE} = "stdout" && -f cover.out ]]; then
go tool cover -func=cover.out
fi
if [[ ${TEST_COVERAGE} = "html" && -f cover.out ]]; then
mkdir -p coverage
fn="coverage/${gopackage////-}.html"
echo " * generating coverage html: ${fn}"
go tool cover -html=cover.out -o "${fn}"
fi
rm -f cover.out
if [[ ${failed} -ne 0 && ${TEST_EXITFIRST} = "yes" ]]; then
exit ${failed}
fi
done
exit ${failed}

View File

@ -1,4 +0,0 @@
*.tmp
.DS_Store
.build
*.swp

View File

@ -1,38 +0,0 @@
# Setting "sudo" to false forces Travis-CI to use its
# container-based build infrastructure, which has shorter
# queue times.
sudo: false
# Use the newer Travis-CI build templates based on the
# Debian Linux distribution "Trusty" release.
dist: trusty
# Selecting C as the language keeps the container to a
# minimum footprint.
language: c
jobs:
include:
# The test stage validates that the protobuf file was updated
# correctly prior to being committed.
- stage: test
script: make
# The lang stages validate the specification using
# language-specific bindings.
# Lang stage: Cxx
- stage: lang
script: make -C lib/cxx
# Lang stage: Go
- stage: lang
language: go
go: 1.10.4
go_import_path: github.com/container-storage-interface/spec
install:
- make -C lib/go protoc
- make -C lib/go protoc-gen-go
script:
- make -C lib/go

Binary file not shown.

View File

@ -1,55 +0,0 @@
# How to Contribute
CSI is under [Apache 2.0](LICENSE) and accepts contributions via GitHub pull requests.
Contributions require signing an individual or Corporate CLA available [here](https://github.com/container-storage-interface/spec/blob/master/CCLA.pdf) which should be signed and mailed to the [mailing list]( https://groups.google.com/forum/#!topic/container-storage-interface-community/).
This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted.
## Markdown style
To keep consistency throughout the Markdown files in the CSI spec, all files should be formatted one sentence per line.
This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length.
For example, this paragraph will span three lines in the Markdown source.
## Code style
This also applies to the code snippets in the markdown files.
* Please wrap the code at 72 characters.
## Comments
This also applies to the code snippets in the markdown files.
* End each sentence within a comment with a punctuation mark (please note that we generally prefer periods); this applies to incomplete sentences as well.
* For trailing comments, leave one space between the end of the code and the beginning of the comment.
## Git commit
The "system of record" for the specification is the `spec.md` file and all hand-edits of the specification should happen there.
**DO NOT** manually edit the generated protobufs or generated language bindings.
Once changes to `spec.md` are complete, please run `make` to update generated files.
**IMPORTANT:** Prior to committing code please run `make` to ensure that your specification changes have landed in all generated files.
### Commit Style
Each commit should represent a single logical (atomic) change: this makes your changes easier to review.
* Try to avoid unrelated cleanups (e.g., typo fixes or style nits) in the same commit that makes functional changes.
While typo fixes are great, including them in the same commit as functional changes makes the commit history harder to read.
* Developers often make incremental commits to save their progress when working on a change, and then “rewrite history” (e.g., using `git rebase -i`) to create a clean set of commits once the change is ready to be reviewed.
Simple house-keeping for clean git history.
Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
* Separate the subject from body with a blank line.
* Limit the subject line to 50 characters.
* Capitalize the subject line.
* Do not end the subject line with a period.
* Use the imperative mood in the subject line.
* Wrap the body at 72 characters.
* Use the body to explain what and why vs. how.
* If there was important/useful/essential conversation or information, copy or include a reference.
* When possible, one keyword to scope the change in the subject (i.e. "README: ...", "tool: ...").

View File

@ -1,51 +0,0 @@
all: build
CSI_SPEC := spec.md
CSI_PROTO := csi.proto
# This is the target for building the temporary CSI protobuf file.
#
# The temporary file is not versioned, and thus will always be
# built on Travis-CI.
$(CSI_PROTO).tmp: $(CSI_SPEC) Makefile
echo "// Code generated by make; DO NOT EDIT." > "$@"
cat $< | sed -n -e '/```protobuf$$/,/^```$$/ p' | sed '/^```/d' >> "$@"
# This is the target for building the CSI protobuf file.
#
# This target depends on its temp file, which is not versioned.
# Therefore when built on Travis-CI the temp file will always
# be built and trigger this target. On Travis-CI the temp file
# is compared with the real file, and if they differ the build
# will fail.
#
# Locally the temp file is simply copied over the real file.
$(CSI_PROTO): $(CSI_PROTO).tmp
ifeq (true,$(TRAVIS))
diff "$@" "$?"
else
diff "$@" "$?" > /dev/null 2>&1 || cp -f "$?" "$@"
endif
build: check
# If this is not running on Travis-CI then for sake of convenience
# go ahead and update the language bindings as well.
ifneq (true,$(TRAVIS))
build:
$(MAKE) -C lib/go
$(MAKE) -C lib/cxx
endif
clean:
$(MAKE) -C lib/go $@
clobber: clean
$(MAKE) -C lib/go $@
rm -f $(CSI_PROTO) $(CSI_PROTO).tmp
# check generated files for violation of standards
check: $(CSI_PROTO)
awk '{ if (length > 72) print NR, $$0 }' $? | diff - /dev/null
.PHONY: clean clobber check

View File

@ -1,10 +0,0 @@
approvers:
- saad-ali # Representing Kubernetes
- thockin # Representing Kubernetes
- jieyu # Representing Mesos
- jdef # Representing Mesos
- anusha-ragunathan # Representing Docker
- ddebroy # Representing Docker
- julian-hj # Representing Cloud Foundry
- paulcwarren # Representing Cloud Foundry
reviewers:

Some files were not shown because too many files have changed in this diff Show More