mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-10 00:10:20 +00:00
commit
724eed5f94
14
.mergify.yml
Normal file
14
.mergify.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
pull_request_rules:
|
||||
- name: automatic merge
|
||||
conditions:
|
||||
- label!=DNM
|
||||
- '#approved-reviews-by>=1'
|
||||
- 'status-success=continuous-integration/travis-ci/pr'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
13
.travis.yml
13
.travis.yml
@ -11,14 +11,15 @@ addons:
|
||||
language: go
|
||||
branches:
|
||||
only:
|
||||
- csi-v0.3
|
||||
- master
|
||||
- csi-v1.0
|
||||
- csi-v1.0 # remove this once csi-v1.0 becomes master
|
||||
|
||||
go: 1.11.x
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO_METALINTER_VERSION="v3.0.0"
|
||||
- GOLANGCI_VERSION="v1.15.0"
|
||||
- TEST_COVERAGE=stdout
|
||||
- GO_METALINTER_THREADS=1
|
||||
- GO_COVER_DIR=_output
|
||||
@ -30,10 +31,10 @@ jobs:
|
||||
- gem install mdl
|
||||
- pip install --user --upgrade pip
|
||||
- pip install --user yamllint
|
||||
# install gometalinter
|
||||
- curl -L
|
||||
"https://raw.githubusercontent.com/alecthomas/gometalinter/"${GO_METALINTER_VERSION}"/scripts/install.sh"
|
||||
| bash -s -- -b $GOPATH/bin "${GO_METALINTER_VERSION}"
|
||||
# install golangci-lint
|
||||
- curl -sf
|
||||
"https://install.goreleaser.com/github.com/golangci/golangci-lint.sh"
|
||||
| bash -s -- -b $GOPATH/bin "${GOLANGCI_VERSION}"
|
||||
script:
|
||||
- scripts/lint-text.sh --require-all
|
||||
- scripts/lint-go.sh
|
||||
|
37
Gopkg.lock
generated
37
Gopkg.lock
generated
@ -20,14 +20,6 @@
|
||||
revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bff0ce7c8e3d6357fa5a8549bbe4bdb620bddc13c11ae569aa7248ea92e2139f"
|
||||
name = "github.com/golang/protobuf"
|
||||
@ -116,14 +108,6 @@
|
||||
revision = "5853414e1d4771302e0df10d1870c444c2135799"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0bde3fb932a1aa4e12bc43ef91157fcda27dd0fc5d9f309647544ceaec075f48"
|
||||
name = "github.com/kubernetes-csi/drivers"
|
||||
packages = ["pkg/csi-common"]
|
||||
pruneopts = "NUT"
|
||||
revision = "05e1ea84df03b90296869812fa42f4244bd5ab53"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
@ -376,11 +360,11 @@
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "67edc246be36579e46a89e29a2f165d47e012109"
|
||||
version = "kubernetes-1.13.2"
|
||||
revision = "74b699b93c15473932b89e3d1818ba8282f3b5ab"
|
||||
version = "kubernetes-1.13.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2da0cbc8dfda27eeffa54b53195e607497c6cac737d17f45a667963aeae5f02"
|
||||
digest = "1:09dee8b7c6cb2fc9c6bee525de3b95199a82a8647a189e153d072a1dfce17de7"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
@ -421,8 +405,8 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
|
||||
version = "kubernetes-1.13.2"
|
||||
revision = "572dfc7bdfcb4531361a17d27b92851f59acf0dc"
|
||||
version = "kubernetes-1.13.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:638623327cb201b425a328d0bddb3379b05eb05ef4cab589380f0be07ac1dc17"
|
||||
@ -485,8 +469,8 @@
|
||||
"util/integer",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "6bf63545bd0257ed9e701ad95307ffa51b4407c0"
|
||||
version = "kubernetes-1.13.2"
|
||||
revision = "6e4752048fde21176ab35eb54ec1117359830d8a"
|
||||
version = "kubernetes-1.13.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
|
||||
@ -507,8 +491,8 @@
|
||||
"pkg/util/nsenter",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cff46ab41ff0bb44d8584413b598ad8360ec1def"
|
||||
version = "v1.13.2"
|
||||
revision = "721bfa751924da8d1680787490c54b9179b1fed0"
|
||||
version = "v1.13.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -533,10 +517,11 @@
|
||||
"github.com/container-storage-interface/spec/lib/go/csi",
|
||||
"github.com/golang/protobuf/ptypes",
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common",
|
||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer",
|
||||
"github.com/pborman/uuid",
|
||||
"github.com/pkg/errors",
|
||||
"golang.org/x/net/context",
|
||||
"google.golang.org/grpc",
|
||||
"google.golang.org/grpc/codes",
|
||||
"google.golang.org/grpc/status",
|
||||
"k8s.io/api/core/v1",
|
||||
|
12
Gopkg.toml
12
Gopkg.toml
@ -2,10 +2,6 @@
|
||||
name = "github.com/container-storage-interface/spec"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kubernetes-csi/drivers"
|
||||
|
||||
[[override]]
|
||||
revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
|
||||
name = "github.com/docker/distribution"
|
||||
@ -15,15 +11,15 @@
|
||||
version = "1.10.0"
|
||||
|
||||
[[constraint]]
|
||||
version = "kubernetes-1.13.2"
|
||||
version = "kubernetes-1.13.3"
|
||||
name = "k8s.io/apimachinery"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "v1.13.2"
|
||||
version = "v1.13.3"
|
||||
|
||||
[[override]]
|
||||
version = "kubernetes-1.13.2"
|
||||
version = "kubernetes-1.13.3"
|
||||
name = "k8s.io/api"
|
||||
|
||||
[[override]]
|
||||
@ -32,7 +28,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.13.2"
|
||||
version = "kubernetes-1.13.3"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
|
32
Makefile
32
Makefile
@ -14,12 +14,17 @@
|
||||
|
||||
.PHONY: all rbdplugin cephfsplugin
|
||||
|
||||
CONTAINER_CMD?=docker
|
||||
|
||||
RBD_IMAGE_NAME=$(if $(ENV_RBD_IMAGE_NAME),$(ENV_RBD_IMAGE_NAME),quay.io/cephcsi/rbdplugin)
|
||||
RBD_IMAGE_VERSION=$(if $(ENV_RBD_IMAGE_VERSION),$(ENV_RBD_IMAGE_VERSION),v1.0.0)
|
||||
|
||||
CEPHFS_IMAGE_NAME=$(if $(ENV_CEPHFS_IMAGE_NAME),$(ENV_CEPHFS_IMAGE_NAME),quay.io/cephcsi/cephfsplugin)
|
||||
CEPHFS_IMAGE_VERSION=$(if $(ENV_CEPHFS_IMAGE_VERSION),$(ENV_CEPHFS_IMAGE_VERSION),v1.0.0)
|
||||
|
||||
CSI_IMAGE_NAME?=quay.io/cephcsi/cephcsi
|
||||
CSI_IMAGE_VERSION?=v1.0.0
|
||||
|
||||
$(info rbd image settings: $(RBD_IMAGE_NAME) version $(RBD_IMAGE_VERSION))
|
||||
$(info cephfs image settings: $(CEPHFS_IMAGE_NAME) version $(CEPHFS_IMAGE_VERSION))
|
||||
|
||||
@ -34,27 +39,28 @@ static-check:
|
||||
./scripts/lint-go.sh
|
||||
./scripts/lint-text.sh
|
||||
|
||||
rbdplugin:
|
||||
.PHONY: cephcsi
|
||||
cephcsi:
|
||||
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./cmd/rbd
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephcsi ./cmd/
|
||||
|
||||
image-rbdplugin: rbdplugin
|
||||
cp _output/rbdplugin deploy/rbd/docker
|
||||
docker build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
|
||||
image-cephcsi: cephcsi
|
||||
cp deploy/cephcsi/image/Dockerfile _output
|
||||
$(CONTAINER_CMD) build -t $(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) _output
|
||||
|
||||
cephfsplugin:
|
||||
if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cephfsplugin ./cmd/cephfs
|
||||
image-rbdplugin: cephcsi
|
||||
cp _output/cephcsi deploy/rbd/docker/rbdplugin
|
||||
$(CONTAINER_CMD) build -t $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION) deploy/rbd/docker
|
||||
|
||||
image-cephfsplugin: cephfsplugin
|
||||
cp _output/cephfsplugin deploy/cephfs/docker
|
||||
docker build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker
|
||||
image-cephfsplugin: cephcsi
|
||||
cp _output/cephsci deploy/cephfs/docker/cephfsplugin
|
||||
$(CONTAINER_CMD) build -t $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION) deploy/cephfs/docker
|
||||
|
||||
push-image-rbdplugin: image-rbdplugin
|
||||
docker push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION)
|
||||
$(CONTAINER_CMD) push $(RBD_IMAGE_NAME):$(RBD_IMAGE_VERSION)
|
||||
|
||||
push-image-cephfsplugin: image-cephfsplugin
|
||||
docker push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION)
|
||||
$(CONTAINER_CMD) push $(CEPHFS_IMAGE_NAME):$(CEPHFS_IMAGE_VERSION)
|
||||
|
||||
clean:
|
||||
go clean -r -x
|
||||
|
133
cmd/cephcsi.go
Normal file
133
cmd/cephcsi.go
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2019 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/cephfs"
|
||||
"github.com/ceph/ceph-csi/pkg/rbd"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
rbdType = "rbd"
|
||||
cephfsType = "cephfs"
|
||||
|
||||
rbdDefaultName = "rbd.csi.ceph.com"
|
||||
cephfsDefaultName = "cephfs.csi.ceph.com"
|
||||
)
|
||||
|
||||
var (
|
||||
// common flags
|
||||
vtype = flag.String("type", "", "driver type [rbd|cephfs]")
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
driverName = flag.String("drivername", "", "name of the driver")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||
|
||||
// rbd related flags
|
||||
containerized = flag.Bool("containerized", true, "whether run as containerized")
|
||||
configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+
|
||||
" cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets")
|
||||
|
||||
// cephfs related flags
|
||||
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
|
||||
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
klog.Exitf("failed to set logtostderr flag: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func getType() string {
|
||||
if vtype == nil || len(*vtype) == 0 {
|
||||
a0 := path.Base(os.Args[0])
|
||||
if strings.Contains(a0, rbdType) {
|
||||
return rbdType
|
||||
}
|
||||
if strings.Contains(a0, cephfsType) {
|
||||
return cephfsType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return *vtype
|
||||
}
|
||||
|
||||
func getDriverName() string {
|
||||
// was explicitly passed a driver name
|
||||
if driverName != nil && len(*driverName) != 0 {
|
||||
return *driverName
|
||||
}
|
||||
// select driver name based on volume type
|
||||
switch getType() {
|
||||
case rbdType:
|
||||
return rbdDefaultName
|
||||
case cephfsType:
|
||||
return cephfsDefaultName
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
driverType := getType()
|
||||
if len(driverType) == 0 {
|
||||
klog.Fatalln("driver type not specified")
|
||||
}
|
||||
|
||||
dname := getDriverName()
|
||||
err := util.ValidateDriverName(dname)
|
||||
if err != nil {
|
||||
klog.Fatalln(err) // calls exit
|
||||
}
|
||||
|
||||
switch driverType {
|
||||
case rbdType:
|
||||
rbd.PluginFolder = rbd.PluginFolder + dname
|
||||
cp, err := util.CreatePersistanceStorage(
|
||||
rbd.PluginFolder, *metadataStorage, dname)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
driver := rbd.NewDriver()
|
||||
driver.Run(dname, *nodeID, *endpoint, *configRoot, *containerized, cp)
|
||||
|
||||
case cephfsType:
|
||||
cephfs.PluginFolder = cephfs.PluginFolder + dname
|
||||
cp, err := util.CreatePersistanceStorage(
|
||||
cephfs.PluginFolder, *metadataStorage, dname)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
driver := cephfs.NewDriver()
|
||||
driver.Run(dname, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, cp)
|
||||
|
||||
default:
|
||||
klog.Fatalln("invalid volume type", vtype) // calls exit
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
@ -19,7 +19,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/cephfs"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
@ -28,37 +27,37 @@ import (
|
||||
|
||||
var (
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
driverName = flag.String("drivername", "csi-cephfsplugin", "name of the driver")
|
||||
driverName = flag.String("drivername", "cephfs.csi.ceph.com", "name of the driver")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
|
||||
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
klog.Exitf("failed to set logtostderr flag: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
util.InitLogging()
|
||||
|
||||
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "controller")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for controller: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := createPersistentStorage(path.Join(cephfs.PluginFolder, "node")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for node: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
|
||||
err := util.ValidateDriverName(*driverName)
|
||||
if err != nil {
|
||||
klog.Fatalln(err)
|
||||
}
|
||||
//update plugin name
|
||||
cephfs.PluginFolder = cephfs.PluginFolder + *driverName
|
||||
|
||||
cp, err := util.CreatePersistanceStorage(cephfs.PluginFolder, *metadataStorage, *driverName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to define cache persistence method: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
driver := cephfs.NewDriver()
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, cp)
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *volumeMounter, *mountCacheDir, cp)
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func createPersistentStorage(persistentStoragePath string) error {
|
||||
return os.MkdirAll(persistentStoragePath, os.FileMode(0755))
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/rbd"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
@ -28,43 +27,38 @@ import (
|
||||
|
||||
var (
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
driverName = flag.String("drivername", "csi-rbdplugin", "name of the driver")
|
||||
driverName = flag.String("drivername", "rbd.csi.ceph.com", "name of the driver")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
containerized = flag.Bool("containerized", true, "whether run as containerized")
|
||||
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
|
||||
configRoot = flag.String("configroot", "/etc/csi-config", "directory in which CSI specific Ceph"+
|
||||
" cluster configurations are present, OR the value \"k8s_objects\" if present as kubernetes secrets")
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
klog.Exitf("failed to set logtostderr flag: %v", err)
|
||||
}
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
util.InitLogging()
|
||||
|
||||
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "controller")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for controller %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := createPersistentStorage(path.Join(rbd.PluginFolder, "node")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for node %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cp, err := util.NewCachePersister(*metadataStorage, *driverName)
|
||||
err := util.ValidateDriverName(*driverName)
|
||||
if err != nil {
|
||||
klog.Fatalln(err)
|
||||
}
|
||||
//update plugin name
|
||||
rbd.PluginFolder = rbd.PluginFolder + *driverName
|
||||
|
||||
cp, err := util.CreatePersistanceStorage(rbd.PluginFolder, *metadataStorage, *driverName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to define cache persistence method: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
driver := rbd.NewDriver()
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *containerized, cp)
|
||||
driver.Run(*driverName, *nodeID, *endpoint, *configRoot, *containerized, cp)
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func createPersistentStorage(persistentStoragePath string) error {
|
||||
if _, err := os.Stat(persistentStoragePath); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(persistentStoragePath, os.FileMode(0755)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
51
deploy.sh
51
deploy.sh
@ -1,6 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "${TRAVIS_BRANCH}" == 'master' ]; then
|
||||
push_helm_chats() {
|
||||
PACKAGE=$1
|
||||
CHANGED=0
|
||||
VERSION=$(grep 'version:' deploy/"$PACKAGE"/helm/Chart.yaml | awk '{print $2}')
|
||||
|
||||
if [ ! -f "tmp/csi-charts/docs/$PACKAGE/ceph-csi-$PACKAGE-$VERSION.tgz" ]; then
|
||||
CHANGED=1
|
||||
ln -s helm deploy/"$PACKAGE"/ceph-csi-"$PACKAGE"
|
||||
mkdir -p tmp/csi-charts/docs/"$PACKAGE"
|
||||
pushd tmp/csi-charts/docs/"$PACKAGE" >/dev/null
|
||||
helm init --client-only
|
||||
helm package ../../../../deploy/"$PACKAGE"/ceph-csi-"$PACKAGE"
|
||||
popd >/dev/null
|
||||
fi
|
||||
|
||||
if [ $CHANGED -eq 1 ]; then
|
||||
pushd tmp/csi-charts/docs >/dev/null
|
||||
helm repo index .
|
||||
git add --all :/ && git commit -m "Update repo"
|
||||
git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts
|
||||
popd >/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
if [ "${TRAVIS_BRANCH}" == 'csi-v0.3' ]; then
|
||||
export RBD_IMAGE_VERSION='v0.3.0'
|
||||
export CEPHFS_IMAGE_VERSION='v0.3.0'
|
||||
elif [ "${TRAVIS_BRANCH}" == 'csi-v1.0' ]; then
|
||||
@ -12,7 +36,7 @@ else
|
||||
fi
|
||||
|
||||
if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
|
||||
docker login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
|
||||
"${CONTAINER_CMD:-docker}" login -u "${QUAY_IO_USERNAME}" -p "${QUAY_IO_PASSWORD}" quay.io
|
||||
make push-image-rbdplugin push-image-cephfsplugin
|
||||
|
||||
set -xe
|
||||
@ -29,25 +53,6 @@ if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
|
||||
mkdir -p csi-charts/docs
|
||||
popd >/dev/null
|
||||
|
||||
CHANGED=0
|
||||
VERSION=$(grep 'version:' deploy/rbd/helm/Chart.yaml | awk '{print $2}')
|
||||
|
||||
if [ ! -f "tmp/csi-charts/docs/rbd/ceph-csi-rbd-$VERSION.tgz" ]; then
|
||||
CHANGED=1
|
||||
ln -s helm deploy/rbd/ceph-csi-rbd
|
||||
mkdir -p tmp/csi-charts/docs/rbd
|
||||
pushd tmp/csi-charts/docs/rbd >/dev/null
|
||||
helm init --client-only
|
||||
helm package ../../../../deploy/rbd/ceph-csi-rbd
|
||||
popd >/dev/null
|
||||
fi
|
||||
|
||||
if [ $CHANGED -eq 1 ]; then
|
||||
pushd tmp/csi-charts/docs >/dev/null
|
||||
helm repo index .
|
||||
git add --all :/ && git commit -m "Update repo"
|
||||
git push https://"$GITHUB_TOKEN"@github.com/ceph/csi-charts
|
||||
popd >/dev/null
|
||||
fi
|
||||
|
||||
push_helm_chats rbd
|
||||
push_helm_chats cephfs
|
||||
fi
|
||||
|
14
deploy/cephcsi/image/Dockerfile
Normal file
14
deploy/cephcsi/image/Dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
|
||||
FROM ceph/ceph:v14.2
|
||||
LABEL maintainers="Ceph-CSI Authors"
|
||||
LABEL description="Ceph-CSI Plugin"
|
||||
|
||||
ENV CSIBIN=/usr/local/bin/cephcsi
|
||||
|
||||
COPY cephcsi $CSIBIN
|
||||
|
||||
RUN chmod +x $CSIBIN && \
|
||||
ln -sf $CSIBIN /usr/local/bin/cephcsi-rbd && \
|
||||
ln -sf $CSIBIN /usr/local/bin/cephcsi-cephfs
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/cephcsi"]
|
21
deploy/cephfs/helm/.helmignore
Normal file
21
deploy/cephfs/helm/.helmignore
Normal file
@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
14
deploy/cephfs/helm/Chart.yaml
Normal file
14
deploy/cephfs/helm/Chart.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
appVersion: "1.0.0"
|
||||
description: "Container Storage Interface (CSI) driver,
|
||||
provisioner, and attacher for Ceph cephfs"
|
||||
name: ceph-csi-cephfs
|
||||
version: 0.5.1
|
||||
keywords:
|
||||
- ceph
|
||||
- cephfs
|
||||
- ceph-csi
|
||||
home: https://github.com/ceph/ceph-csi
|
||||
sources:
|
||||
- https://github.com/ceph/ceph-csi/tree/csi-v1.0/deploy/cephfs/helm
|
29
deploy/cephfs/helm/README.md
Normal file
29
deploy/cephfs/helm/README.md
Normal file
@ -0,0 +1,29 @@
|
||||
# ceph-csi-cephfs
|
||||
|
||||
The ceph-csi-cephfs chart adds cephfs volume support to your cluster.
|
||||
|
||||
## Install Chart
|
||||
|
||||
To install the Chart into your Kubernetes cluster
|
||||
|
||||
```bash
|
||||
helm install --namespace "ceph-csi-cephfs" --name "ceph-csi-cephfs" ceph-csi/ceph-csi-cephfs
|
||||
```
|
||||
|
||||
After installation succeeds, you can get a status of Chart
|
||||
|
||||
```bash
|
||||
helm status "ceph-csi-cephfs"
|
||||
```
|
||||
|
||||
If you want to delete your Chart, use this command
|
||||
|
||||
```bash
|
||||
helm delete --purge "ceph-csi-cephfs"
|
||||
```
|
||||
|
||||
If you want to delete the namespace, use this command
|
||||
|
||||
```bash
|
||||
kubectl delete namespace ceph-csi-rbd
|
||||
```
|
2
deploy/cephfs/helm/templates/NOTES.txt
Normal file
2
deploy/cephfs/helm/templates/NOTES.txt
Normal file
@ -0,0 +1,2 @@
|
||||
Examples on how to configure a storage class and start using the driver are here:
|
||||
https://github.com/ceph/ceph-csi/tree/csi-v1.0/examples/cephfs
|
119
deploy/cephfs/helm/templates/_helpers.tpl
Normal file
119
deploy/cephfs/helm/templates/_helpers.tpl
Normal file
@ -0,0 +1,119 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.attacher.fullname" -}}
|
||||
{{- if .Values.attacher.fullnameOverride -}}
|
||||
{{- .Values.attacher.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.attacher.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.nodeplugin.fullname" -}}
|
||||
{{- if .Values.nodeplugin.fullnameOverride -}}
|
||||
{{- .Values.nodeplugin.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeplugin.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.provisioner.fullname" -}}
|
||||
{{- if .Values.provisioner.fullnameOverride -}}
|
||||
{{- .Values.provisioner.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.provisioner.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.serviceAccountName.attacher" -}}
|
||||
{{- if .Values.serviceAccounts.attacher.create -}}
|
||||
{{ default (include "ceph-csi-cephfs.attacher.fullname" .) .Values.serviceAccounts.attacher.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.attacher.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.serviceAccountName.nodeplugin" -}}
|
||||
{{- if .Values.serviceAccounts.nodeplugin.create -}}
|
||||
{{ default (include "ceph-csi-cephfs.nodeplugin.fullname" .) .Values.serviceAccounts.nodeplugin.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.nodeplugin.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "ceph-csi-cephfs.serviceAccountName.provisioner" -}}
|
||||
{{- if .Values.serviceAccounts.provisioner.create -}}
|
||||
{{ default (include "ceph-csi-cephfs.provisioner.fullname" .) .Values.serviceAccounts.provisioner.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.provisioner.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
28
deploy/cephfs/helm/templates/attacher-clusterrole.yaml
Normal file
28
deploy/cephfs/helm/templates/attacher-clusterrole.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end -}}
|
@ -0,0 +1,20 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
18
deploy/cephfs/helm/templates/attacher-service.yaml
Normal file
18
deploy/cephfs/helm/templates/attacher-service.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
selector:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
12
deploy/cephfs/helm/templates/attacher-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/attacher-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccounts.attacher.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- end -}}
|
60
deploy/cephfs/helm/templates/attacher-statefulset.yaml
Normal file
60
deploy/cephfs/helm/templates/attacher-statefulset.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1beta1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceName: {{ include "ceph-csi-cephfs.attacher.fullname" . }}
|
||||
replicas: {{ .Values.attacher.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.attacher.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.attacher" . }}
|
||||
containers:
|
||||
- name: csi-cephfsplugin-attacher
|
||||
image: "{{ .Values.attacher.image.repository }}:{{ .Values.attacher.image.tag }}"
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||
imagePullPolicy: {{ .Values.attacher.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.socketDir }}
|
||||
resources:
|
||||
{{ toYaml .Values.attacher.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: {{ .Values.socketDir }}
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.attacher.affinity -}}
|
||||
affinity:
|
||||
{{ toYaml .Values.attacher.affinity . | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.attacher.nodeSelector -}}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.attacher.nodeSelector | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.attacher.tolerations -}}
|
||||
tolerations:
|
||||
{{ toYaml .Values.attacher.tolerations | indent 8 }}
|
||||
{{- end -}}
|
31
deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml
Normal file
31
deploy/cephfs/helm/templates/nodeplugin-clusterrole.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end -}}
|
@ -0,0 +1,20 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
150
deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml
Normal file
150
deploy/cephfs/helm/templates/nodeplugin-daemonset.yaml
Normal file
@ -0,0 +1,150 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
|
||||
# resolved through k8s service, set dns policy to cluster first
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: "{{ .Values.nodeplugin.registrar.image.repository }}:{{ .Values.nodeplugin.registrar.image.tag }}"
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/{{ .Values.socketFile }}"
|
||||
- "--kubelet-registration-path={{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [
|
||||
"/bin/sh", "-c",
|
||||
'rm -rf /registration/{{ .Values.driverName }}
|
||||
/registration/{{ .Values.driverName }}-reg.sock'
|
||||
]
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: {{ .Values.nodeplugin.registrar.image.imagePullPolicy }}
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
resources:
|
||||
{{ toYaml .Values.nodeplugin.registrar.resources | indent 12 }}
|
||||
- name: csi-cephfsplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
|
||||
args :
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
- "--mountcachedir=/mount-cache-dir"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
- name: DRIVER_NAME
|
||||
value: {{ .Values.driverName }}
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||
volumeMounts:
|
||||
- name: mount-cache-dir
|
||||
mountPath: /mount-cache-dir
|
||||
- name: plugin-dir
|
||||
mountPath: {{ .Values.socketDir }}
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: plugin-mount-dir
|
||||
mountPath: {{ .Values.volumeDevicesDir }}
|
||||
mountPropagation: "Bidirectional"
|
||||
- mountPath: /dev
|
||||
name: host-dev
|
||||
- mountPath: /rootfs
|
||||
name: host-rootfs
|
||||
- mountPath: /sys
|
||||
name: host-sys
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
resources:
|
||||
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: mount-cache-dir
|
||||
emptyDir: {}
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: {{ .Values.socketDir }}
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-mount-dir
|
||||
hostPath:
|
||||
path: {{ .Values.volumeDevicesDir }}
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: {{ .Values.registrationDir }}
|
||||
type: Directory
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- name: host-dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: host-rootfs
|
||||
hostPath:
|
||||
path: /
|
||||
- name: host-sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
{{- if .Values.nodeplugin.affinity -}}
|
||||
affinity:
|
||||
{{ toYaml .Values.nodeplugin.affinity . | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.nodeplugin.nodeSelector -}}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeplugin.nodeSelector | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.nodeplugin.tolerations -}}
|
||||
tolerations:
|
||||
{{ toYaml .Values.nodeplugin.tolerations | indent 8 }}
|
||||
{{- end -}}
|
12
deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/nodeplugin-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccounts.nodeplugin.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.nodeplugin" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.nodeplugin.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- end -}}
|
37
deploy/cephfs/helm/templates/provisioner-clusterrole.yaml
Normal file
37
deploy/cephfs/helm/templates/provisioner-clusterrole.yaml
Normal file
@ -0,0 +1,37 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end -}}
|
@ -0,0 +1,20 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
19
deploy/cephfs/helm/templates/provisioner-role.yaml
Normal file
19
deploy/cephfs/helm/templates/provisioner-role.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
{{- end -}}
|
21
deploy/cephfs/helm/templates/provisioner-rolebinding.yaml
Normal file
21
deploy/cephfs/helm/templates/provisioner-rolebinding.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
18
deploy/cephfs/helm/templates/provisioner-service.yaml
Normal file
18
deploy/cephfs/helm/templates/provisioner-service.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
selector:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
12
deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml
Normal file
12
deploy/cephfs/helm/templates/provisioner-serviceaccount.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccounts.provisioner.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- end -}}
|
94
deploy/cephfs/helm/templates/provisioner-statefulset.yaml
Normal file
94
deploy/cephfs/helm/templates/provisioner-statefulset.yaml
Normal file
@ -0,0 +1,94 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1beta1
|
||||
metadata:
|
||||
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceName: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
|
||||
replicas: {{ .Values.provisioner.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "ceph-csi-cephfs.name" . }}
|
||||
chart: {{ include "ceph-csi-cephfs.chart" . }}
|
||||
component: {{ .Values.provisioner.name }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "ceph-csi-cephfs.serviceAccountName.provisioner" . }}
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--v=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||
imagePullPolicy: {{ .Values.provisioner.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.socketDir }}
|
||||
resources:
|
||||
{{ toYaml .Values.provisioner.resources | indent 12 }}
|
||||
- name: csi-cephfsplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
|
||||
args :
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
- name: DRIVER_NAME
|
||||
value: {{ .Values.driverName }}
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
|
||||
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.imagePullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: {{ .Values.socketDir }}
|
||||
- name: host-rootfs
|
||||
mountPath: "/rootfs"
|
||||
resources:
|
||||
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
#FIXME this seems way too much. Why is it needed at all for this?
|
||||
- name: host-rootfs
|
||||
hostPath:
|
||||
path: /
|
||||
{{- if .Values.provisioner.affinity -}}
|
||||
affinity:
|
||||
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.provisioner.nodeSelector -}}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}
|
||||
{{- end -}}
|
||||
{{- if .Values.provisioner.tolerations -}}
|
||||
tolerations:
|
||||
{{ toYaml .Values.provisioner.tolerations | indent 8 }}
|
||||
{{- end -}}
|
80
deploy/cephfs/helm/values.yaml
Normal file
80
deploy/cephfs/helm/values.yaml
Normal file
@ -0,0 +1,80 @@
|
||||
---
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
serviceAccounts:
|
||||
attacher:
|
||||
create: true
|
||||
name:
|
||||
nodeplugin:
|
||||
create: true
|
||||
name:
|
||||
provisioner:
|
||||
create: true
|
||||
name:
|
||||
|
||||
socketDir: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||
socketFile: csi.sock
|
||||
registrationDir: /var/lib/kubelet/plugins_registry
|
||||
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
|
||||
driverName: cephfs.csi.ceph.com
|
||||
attacher:
|
||||
name: attacher
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/k8scsi/csi-attacher
|
||||
tag: v1.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
nodeplugin:
|
||||
name: nodeplugin
|
||||
|
||||
registrar:
|
||||
image:
|
||||
repository: quay.io/k8scsi/csi-node-driver-registrar
|
||||
tag: v1.0.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources: {}
|
||||
|
||||
plugin:
|
||||
image:
|
||||
repository: quay.io/cephcsi/cephfsplugin
|
||||
tag: v1.0.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
provisioner:
|
||||
name: provisioner
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/k8scsi/csi-provisioner
|
||||
tag: v1.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
@ -10,9 +10,6 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cephfs-external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
@ -22,6 +19,9 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
|
@ -34,13 +34,13 @@ spec:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
|
||||
value: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
mountPath: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||
type: DirectoryOrCreate
|
||||
|
@ -34,11 +34,11 @@ spec:
|
||||
- "--v=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
mountPath: /csi
|
||||
- name: csi-cephfsplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -49,7 +49,7 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-cephfsplugin"
|
||||
- "--drivername=cephfs.csi.ceph.com"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
@ -61,11 +61,11 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi-provisioner.sock
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
mountPath: /csi
|
||||
- name: host-sys
|
||||
mountPath: /sys
|
||||
- name: lib-modules
|
||||
@ -76,7 +76,7 @@ spec:
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com
|
||||
type: DirectoryOrCreate
|
||||
- name: host-sys
|
||||
hostPath:
|
||||
|
@ -23,7 +23,7 @@ spec:
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
@ -53,8 +53,9 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-cephfsplugin"
|
||||
- "--drivername=cephfs.csi.ceph.com"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
- "--mountcachedir=/mount-cache-dir"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
@ -65,11 +66,13 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://var/lib/kubelet/plugins/csi-cephfsplugin/csi.sock
|
||||
value: unix:///csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: mount-cache-dir
|
||||
mountPath: /mount-cache-dir
|
||||
- name: plugin-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-cephfsplugin
|
||||
mountPath: /csi
|
||||
- name: csi-plugins-dir
|
||||
mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
|
||||
mountPropagation: "Bidirectional"
|
||||
@ -84,9 +87,11 @@ spec:
|
||||
- name: host-dev
|
||||
mountPath: /dev
|
||||
volumes:
|
||||
- name: mount-cache-dir
|
||||
emptyDir: {}
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-cephfsplugin/
|
||||
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
|
||||
type: DirectoryOrCreate
|
||||
- name: csi-plugins-dir
|
||||
hostPath:
|
||||
|
@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cephfs-csi-nodeplugin
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
@ -22,9 +25,6 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
|
@ -10,9 +10,15 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cephfs-external-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
@ -22,12 +28,9 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
@ -42,3 +45,35 @@ roleRef:
|
||||
kind: ClusterRole
|
||||
name: cephfs-external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: cephfs-external-provisioner-cfg
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cephfs-csi-provisioner-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cephfs-csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: cephfs-external-provisioner-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
@ -4,7 +4,7 @@ appVersion: "1.0.0"
|
||||
description: "Container Storage Interface (CSI) driver,
|
||||
provisioner, snapshotter, and attacher for Ceph RBD"
|
||||
name: ceph-csi-rbd
|
||||
version: 0.4.0
|
||||
version: 0.5.1
|
||||
keywords:
|
||||
- ceph
|
||||
- rbd
|
||||
|
@ -4,7 +4,7 @@ The ceph-csi-rbd chart adds rbd volume support to your cluster.
|
||||
|
||||
## Install Chart
|
||||
|
||||
To install the Chart into your Kubernetes cluster :
|
||||
To install the Chart into your Kubernetes cluster
|
||||
|
||||
```bash
|
||||
helm install --namespace "ceph-csi-rbd" --name "ceph-csi-rbd" ceph-csi/ceph-csi-rbd
|
||||
@ -16,9 +16,14 @@ After installation succeeds, you can get a status of Chart
|
||||
helm status "ceph-csi-rbd"
|
||||
```
|
||||
|
||||
If you want to delete your Chart, use this command:
|
||||
If you want to delete your Chart, use this command
|
||||
|
||||
```bash
|
||||
helm delete --purge "ceph-csi-rbd"
|
||||
```
|
||||
|
||||
If you want to delete the namespace, use this command
|
||||
|
||||
```bash
|
||||
kubectl delete namespace ceph-csi-rbd
|
||||
```
|
||||
|
@ -22,4 +22,7 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end -}}
|
||||
|
@ -39,7 +39,11 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/csi-rbdplugin /registration/csi-rbdplugin-reg.sock"]
|
||||
command: [
|
||||
"/bin/sh", "-c",
|
||||
'rm -rf /registration/{{ .Values.driverName }}
|
||||
/registration/{{ .Values.driverName }}-reg.sock'
|
||||
]
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
@ -64,12 +68,14 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-rbdplugin"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--containerized=true"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
- name: DRIVER_NAME
|
||||
value: {{ .Values.driverName }}
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -10,6 +10,9 @@ metadata:
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
@ -43,4 +46,7 @@ rules:
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end -}}
|
||||
|
@ -69,12 +69,14 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-rbdplugin"
|
||||
- "--drivername=$(DRIVER_NAME)"
|
||||
- "--containerized=true"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
- name: DRIVER_NAME
|
||||
value: {{ .Values.driverName }}
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -13,10 +13,11 @@ serviceAccounts:
|
||||
create: true
|
||||
name:
|
||||
|
||||
socketDir: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
socketDir: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||
socketFile: csi.sock
|
||||
registrationDir: /var/lib/kubelet/plugins_registry
|
||||
volumeDevicesDir: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices
|
||||
driverName: rbd.csi.ceph.com
|
||||
|
||||
attacher:
|
||||
name: attacher
|
||||
|
@ -10,9 +10,6 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: rbd-external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
@ -22,6 +19,9 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
|
@ -10,6 +10,9 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: rbd-csi-nodeplugin
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
|
@ -10,9 +10,15 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: rbd-external-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
@ -22,18 +28,9 @@ rules:
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||
@ -43,6 +40,9 @@ rules:
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
@ -57,3 +57,35 @@ roleRef:
|
||||
kind: ClusterRole
|
||||
name: rbd-external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: rbd-external-provisioner-cfg
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: rbd-csi-provisioner-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: rbd-csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: rbd-external-provisioner-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
@ -34,13 +34,13 @@ spec:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
|
||||
value: unix:///csi/csi-attacher.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
mountPath: /csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||
type: DirectoryOrCreate
|
||||
|
@ -34,11 +34,11 @@ spec:
|
||||
- "--v=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
mountPath: /csi
|
||||
- name: csi-snapshotter
|
||||
image: quay.io/k8scsi/csi-snapshotter:v1.0.1
|
||||
args:
|
||||
@ -47,13 +47,13 @@ spec:
|
||||
- "--v=5"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
mountPath: /csi
|
||||
- name: csi-rbdplugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -64,9 +64,10 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-rbdplugin"
|
||||
- "--drivername=rbd.csi.ceph.com"
|
||||
- "--containerized=true"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
- "--configroot=k8s_objects"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
@ -79,11 +80,11 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://var/lib/kubelet/plugins/csi-rbdplugin/csi-provisioner.sock
|
||||
value: unix:///csi/csi-provisioner.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
mountPath: /csi
|
||||
- mountPath: /dev
|
||||
name: host-dev
|
||||
- mountPath: /rootfs
|
||||
@ -108,5 +109,5 @@ spec:
|
||||
path: /lib/modules
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||
type: DirectoryOrCreate
|
||||
|
@ -24,14 +24,14 @@ spec:
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-rbdplugin/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [
|
||||
"/bin/sh", "-c",
|
||||
"rm -rf /registration/csi-rbdplugin \
|
||||
/registration/csi-rbdplugin-reg.sock"
|
||||
"rm -rf /registration/rbd.csi.ceph.com \
|
||||
/registration/rbd.csi.ceph.com-reg.sock"
|
||||
]
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
@ -54,9 +54,10 @@ spec:
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--v=5"
|
||||
- "--drivername=csi-rbdplugin"
|
||||
- "--drivername=rbd.csi.ceph.com"
|
||||
- "--containerized=true"
|
||||
- "--metadatastorage=k8s_configmap"
|
||||
- "--configroot=k8s_objects"
|
||||
env:
|
||||
- name: HOST_ROOTFS
|
||||
value: "/rootfs"
|
||||
@ -69,11 +70,11 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://var/lib/kubelet/plugins_registry/csi-rbdplugin/csi.sock
|
||||
value: unix:///csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /var/lib/kubelet/plugins_registry/csi-rbdplugin
|
||||
mountPath: /csi
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
@ -92,7 +93,7 @@ spec:
|
||||
volumes:
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-rbdplugin
|
||||
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
|
||||
type: DirectoryOrCreate
|
||||
- name: plugin-mount-dir
|
||||
hostPath:
|
||||
|
@ -5,8 +5,8 @@ and attach and mount existing ones to workloads.
|
||||
|
||||
## Building
|
||||
|
||||
CSI CephFS plugin can be compiled in a form of a binary file or in a form of a
|
||||
Docker image.
|
||||
CSI CephFS plugin can be compiled in the form of a binary file or in the form
|
||||
of a Docker image.
|
||||
When compiled as a binary file, the result is stored in `_output/`
|
||||
directory with the name `cephfsplugin`.
|
||||
When compiled as an image, it's stored in the local Docker image store.
|
||||
@ -30,15 +30,17 @@ make image-cephfsplugin
|
||||
Option | Default value | Description
|
||||
--------------------|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket
|
||||
`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
||||
`--drivername` | `cephfs.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
||||
`--nodeid` | _empty_ | This node's ID
|
||||
`--volumemounter` | _empty_ | default volume mounter. Available options are `kernel` and `fuse`. This is the mount method used if volume parameters don't specify otherwise. If left unspecified, the driver will first probe for `ceph-fuse` in system's path and will choose Ceph kernel client if probing failed.
|
||||
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
||||
`--metadatastorage` | _empty_ | Whether metadata should be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
||||
`--mountcachedir` | _empty_ | volume mount cache info save dir. If left unspecified, the dirver will not record mount info, or it will save mount info and when driver restart it will remount volume it cached.
|
||||
|
||||
**Available environmental variables:** `KUBERNETES_CONFIG_PATH`: if you use
|
||||
`k8s_configmap` as metadata store, specify the path of your k8s config file (if
|
||||
not specified, the plugin will assume you're running it inside a k8s cluster and
|
||||
find the config itself).
|
||||
**Available environmental variables:**
|
||||
|
||||
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
|
||||
the path of your k8s config file (if not specified, the plugin will assume
|
||||
you're running it inside a k8s cluster and find the config itself).
|
||||
|
||||
`POD_NAMESPACE`: if you use `k8s_configmap` as metadata store, `POD_NAMESPACE`
|
||||
is used to define in which namespace you want the configmaps to be stored
|
||||
|
@ -1,7 +1,7 @@
|
||||
# CSI RBD Plugin
|
||||
|
||||
The RBD CSI plugin is able to provision new RBD images and
|
||||
attach and mount those to worlkoads.
|
||||
attach and mount those to workloads.
|
||||
|
||||
## Building
|
||||
|
||||
@ -29,12 +29,14 @@ make image-rbdplugin
|
||||
Option | Default value | Description
|
||||
------ | ------------- | -----------
|
||||
`--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket
|
||||
`--drivername` | `csi-cephfsplugin` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
||||
`--drivername` | `rbd.csi.ceph.com` | name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value)
|
||||
`--nodeid` | _empty_ | This node's ID
|
||||
`--containerized` | true | Whether running in containerized mode
|
||||
`--metadatastorage` | _empty_ | Whether should metadata be kept on node as file or in a k8s configmap (`node` or `k8s_configmap`)
|
||||
`--configroot` | `/etc/csi-config` | Directory in which CSI specific Ceph cluster configurations are present, OR the value `k8s_objects` if present as kubernetes secrets"
|
||||
|
||||
**Available environmental variables:**
|
||||
|
||||
`HOST_ROOTFS`: rbdplugin searches `/proc` directory under the directory set by `HOST_ROOTFS`.
|
||||
|
||||
`KUBERNETES_CONFIG_PATH`: if you use `k8s_configmap` as metadata store, specify
|
||||
@ -49,8 +51,9 @@ the configmaps to be stored
|
||||
|
||||
Parameter | Required | Description
|
||||
--------- | -------- | -----------
|
||||
`monitors` | one of `monitors` and `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||
`monValueFromSecret` | one of `monitors` and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
||||
`monitors` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | Comma separated list of Ceph monitors (e.g. `192.168.100.1:6789,192.168.100.2:6789,192.168.100.3:6789`)
|
||||
`monValueFromSecret` | one of `monitors`, `clusterID` or and `monValueFromSecret` must be set | a string pointing the key in the credential secret, whose value is the mon. This is used for the case when the monitors' IP or hostnames are changed, the secret can be updated to pick up the new monitors.
|
||||
`clusterID` | one of `monitors`, `clusterID` or `monValueFromSecret` must be set | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use
|
||||
`pool` | yes | Ceph pool into which the RBD image shall be created
|
||||
`imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format)
|
||||
`imageFeatures` | no | RBD image features. Available for `imageFormat=2`. CSI RBD currently supports only `layering` feature. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-feature)
|
||||
@ -58,13 +61,22 @@ Parameter | Required | Description
|
||||
`csi.storage.k8s.io/provisioner-secret-namespace`, `csi.storage.k8s.io/node-publish-secret-namespace` | for Kubernetes | namespaces of the above Secret objects
|
||||
`mounter`| no | if set to `rbd-nbd`, use `rbd-nbd` on nodes that have `rbd-nbd` and `nbd` kernel modules to map rbd images
|
||||
|
||||
NOTE: If `clusterID` parameter is used, then an accompanying Ceph cluster
|
||||
configuration secret or config files needs to be provided to the running pods.
|
||||
Refer to [Cluster ID based configuration](../examples/README.md#cluster-id-based-configuration)
|
||||
for more information. A suggested way to populate the clusterID is to use the
|
||||
output of `ceph fsid` of the Ceph cluster to be used for provisioning.
|
||||
|
||||
**Required secrets:**
|
||||
|
||||
Admin credentials are required for provisioning new RBD images `ADMIN_NAME`:
|
||||
`ADMIN_PASSWORD` - note that the key of the key-value pair is the name of the
|
||||
client with admin privileges, and the value is its password
|
||||
|
||||
Also note that CSI RBD expects admin keyring and Ceph config file in `/etc/ceph`.
|
||||
If clusterID is specified, then a secret with various keys and values as
|
||||
specified in `examples/rbd/template-ceph-cluster-ID-secret.yaml` needs to be
|
||||
created, with the secret name matching the string value provided as the
|
||||
`clusterID`.
|
||||
|
||||
## Deployment with Kubernetes
|
||||
|
||||
@ -110,7 +122,7 @@ Deploys a daemon set with two containers: CSI driver-registrar and the CSI RBD d
|
||||
|
||||
## Verifying the deployment in Kubernetes
|
||||
|
||||
After successfuly completing the steps above, you should see output similar to this:
|
||||
After successfully completing the steps above, you should see output similar to this:
|
||||
|
||||
```bash
|
||||
$ kubectl get all
|
||||
|
@ -7,11 +7,16 @@ By default, they look for the YAML manifests in
|
||||
`../../deploy/{rbd,cephfs}/kubernetes`.
|
||||
You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`.
|
||||
|
||||
Once the plugin is successfuly deployed, you'll need to customize
|
||||
Once the plugin is successfully deployed, you'll need to customize
|
||||
`storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster
|
||||
setup.
|
||||
Please consult the documentation for info about available parameters.
|
||||
|
||||
**NOTE:** See section
|
||||
[Cluster ID based configuration](#cluster-id-based-configuration) if using
|
||||
the `clusterID` instead of `monitors` or `monValueFromSecret` option in the
|
||||
storage class for RBD based provisioning before proceeding.
|
||||
|
||||
After configuring the secrets, monitors, etc. you can deploy a
|
||||
testing Pod mounting a RBD image / CephFS volume:
|
||||
|
||||
@ -108,9 +113,142 @@ one of your Ceph pod.
|
||||
|
||||
To restore the snapshot to a new PVC, deploy
|
||||
[pvc-restore.yaml](./rbd/pvc-restore.yaml) and a testing pod
|
||||
[pod-restore.yaml](./rbd/pvc-restore.yaml):
|
||||
[pod-restore.yaml](./rbd/pod-restore.yaml):
|
||||
|
||||
```bash
|
||||
kubectl create -f pvc-restore.yaml
|
||||
kubectl create -f pod-restore.yaml
|
||||
```
|
||||
|
||||
## How to test RBD MULTI_NODE_MULTI_WRITER BLOCK feature
|
||||
|
||||
Requires feature-gates: `BlockVolume=true` `CSIBlockVolume=true`
|
||||
|
||||
*NOTE* The MULTI_NODE_MULTI_WRITER capability is only available for
|
||||
Volumes that are of access_type `block`
|
||||
|
||||
*WARNING* This feature is strictly for workloads that know how to deal
|
||||
with concurrent access to the Volume (eg Active/Passive applications).
|
||||
Using RWX modes on non clustered file systems with applications trying
|
||||
to simultaneously access the Volume will likely result in data corruption!
|
||||
|
||||
Following are examples for issuing a request for a `Block`
|
||||
`ReadWriteMany` Claim, and using the resultant Claim for a POD
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: block-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: csi-rbd
|
||||
```
|
||||
|
||||
Create a POD that uses this PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: my-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: my-container
|
||||
image: debian
|
||||
command: ["/bin/bash", "-c"]
|
||||
args: [ "tail -f /dev/null" ]
|
||||
volumeDevices:
|
||||
- devicePath: /dev/rbdblock
|
||||
name: my-volume
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumes:
|
||||
- name: my-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: block-pvc
|
||||
|
||||
```
|
||||
|
||||
Now, we can create a second POD (ensure the POD is scheduled on a different
|
||||
node; multiwriter single node works without this feature) that also uses this
|
||||
PVC at the same time, again wait for the pod to enter running state, and verify
|
||||
the block device is available.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: another-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: my-container
|
||||
image: debian
|
||||
command: ["/bin/bash", "-c"]
|
||||
args: [ "tail -f /dev/null" ]
|
||||
volumeDevices:
|
||||
- devicePath: /dev/rbdblock
|
||||
name: my-volume
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumes:
|
||||
- name: my-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: block-pvc
|
||||
```
|
||||
|
||||
Wait for the PODs to enter Running state, check that our block device
|
||||
is available in the container at `/dev/rdbblock` in both containers:
|
||||
|
||||
```bash
|
||||
$ kubectl exec -it my-pod -- fdisk -l /dev/rbdblock
|
||||
Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl exec -it another-pod -- fdisk -l /dev/rbdblock
|
||||
Disk /dev/rbdblock: 1 GiB, 1073741824 bytes, 2097152 sectors
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
|
||||
```
|
||||
|
||||
## Cluster ID based configuration
|
||||
|
||||
Before creating a storage class that uses the option `clusterID` to refer to a
|
||||
Ceph cluster, the following actions need to be completed.
|
||||
|
||||
Get the following information from the Ceph cluster,
|
||||
|
||||
* Admin ID and key, that has privileges to perform CRUD operations on the Ceph
|
||||
cluster and pools of choice
|
||||
* Key is typically the output of, `ceph auth get-key client.admin` where
|
||||
`admin` is the Admin ID
|
||||
* Used to substitute admin/user id and key values in the files below
|
||||
* Ceph monitor list
|
||||
* Typically in the output of `ceph mon dump`
|
||||
* Used to prepare comma separated MON list where required in the files below
|
||||
* Ceph Cluster fsid
|
||||
* If choosing to use the Ceph cluster fsid as the unique value of clusterID,
|
||||
* Output of `ceph fsid`
|
||||
* Used to substitute `<cluster-id>` references in the files below
|
||||
|
||||
Update the template
|
||||
[template-ceph-cluster-ID-secret.yaml](./rbd/template-ceph-cluster-ID-secret.yaml)
|
||||
with values from
|
||||
a Ceph cluster and replace `<cluster-id>` with the chosen clusterID to create
|
||||
the following secret,
|
||||
|
||||
* `kubectl create -f rbd/template-ceph-cluster-ID-secret.yaml`
|
||||
|
||||
Storage class and snapshot class, using `<cluster-id>` as the value for the
|
||||
option `clusterID`, can now be created on the cluster.
|
||||
|
||||
Remaining steps to test functionality remains the same as mentioned in the
|
||||
sections above.
|
||||
|
@ -3,7 +3,7 @@ apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-cephfs
|
||||
provisioner: csi-cephfsplugin
|
||||
provisioner: cephfs.csi.ceph.com
|
||||
parameters:
|
||||
# Comma separated list of Ceph monitors
|
||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||
|
18
examples/rbd/raw-block-pod.yaml
Normal file
18
examples/rbd/raw-block-pod.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-raw-block-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: fc-container
|
||||
image: fedora:26
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["tail -f /dev/null"]
|
||||
volumeDevices:
|
||||
- name: data
|
||||
devicePath: /dev/xvda
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: raw-block-pvc
|
13
examples/rbd/raw-block-pvc.yaml
Normal file
13
examples/rbd/raw-block-pvc.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: raw-block-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: csi-rbd
|
@ -3,9 +3,21 @@ apiVersion: snapshot.storage.k8s.io/v1alpha1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-rbdplugin-snapclass
|
||||
snapshotter: csi-rbdplugin
|
||||
snapshotter: rbd.csi.ceph.com
|
||||
parameters:
|
||||
pool: rbd
|
||||
# Comma separated list of Ceph monitors
|
||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||
monitors: mon1:port,mon2:port,...
|
||||
# OR,
|
||||
# String representing a Ceph cluster to provision storage from.
|
||||
# Should be unique across all Ceph clusters in use for provisioning,
|
||||
# cannot be greater than 36 bytes in length, and should remain immutable for
|
||||
# the lifetime of the StorageClass in use.
|
||||
# If using clusterID, ensure to create a secret, as in
|
||||
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to
|
||||
# represent the Ceph cluster in clusterID
|
||||
# clusterID: <cluster-id>
|
||||
|
||||
csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
|
||||
csi.storage.k8s.io/snapshotter-secret-namespace: default
|
||||
|
@ -3,12 +3,21 @@ apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-rbd
|
||||
provisioner: csi-rbdplugin
|
||||
provisioner: rbd.csi.ceph.com
|
||||
parameters:
|
||||
# Comma separated list of Ceph monitors
|
||||
# if using FQDN, make sure csi plugin's dns policy is appropriate.
|
||||
monitors: mon1:port,mon2:port,...
|
||||
|
||||
# OR,
|
||||
# String representing a Ceph cluster to provision storage from.
|
||||
# Should be unique across all Ceph clusters in use for provisioning,
|
||||
# cannot be greater than 36 bytes in length, and should remain immutable for
|
||||
# the lifetime of the StorageClass in use.
|
||||
# If using clusterID, ensure to create a secret, as in
|
||||
# template-ceph-cluster-ID-secret.yaml, to accompany the string chosen to
|
||||
# represent the Ceph cluster in clusterID
|
||||
# clusterID: <cluster-id>
|
||||
# OR,
|
||||
# if "monitors" parameter is not set, driver to get monitors from same
|
||||
# secret as admin/user credentials. "monValueFromSecret" provides the
|
||||
# key in the secret whose value is the mons
|
||||
@ -25,12 +34,18 @@ parameters:
|
||||
imageFeatures: layering
|
||||
|
||||
# The secrets have to contain Ceph admin credentials.
|
||||
# NOTE: If using "clusterID" instead of "monitors" above, the following
|
||||
# secrets MAY be added to the ceph-cluster-<cluster-id> secret and skipped
|
||||
# here
|
||||
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: default
|
||||
csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
|
||||
csi.storage.k8s.io/node-publish-secret-namespace: default
|
||||
|
||||
# Ceph users for operating RBD
|
||||
# NOTE: If using "clusterID" instead of "monitors" above, the following
|
||||
# IDs MAY be added to the ceph-cluster-<cluster-id> secret and skipped
|
||||
# here
|
||||
adminid: admin
|
||||
userid: kubernetes
|
||||
# uncomment the following to use rbd-nbd as mounter on supported nodes
|
||||
|
36
examples/rbd/template-ceph-cluster-ID-secret.yaml
Normal file
36
examples/rbd/template-ceph-cluster-ID-secret.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
# This is a template secret that helps define a Ceph cluster configuration
|
||||
# as required by the CSI driver. This is used when a StorageClass has the
|
||||
# "clusterID" defined as one of the parameters, to provide the CSI instance
|
||||
# Ceph cluster configuration information.
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
# The <cluster-id> is used by the CSI plugin to uniquely identify and use a
|
||||
# Ceph cluster, the value MUST match the value provided as `clusterID` in the
|
||||
# StorageClass
|
||||
name: ceph-cluster-<cluster-id>
|
||||
namespace: default
|
||||
data:
|
||||
# Base64 encoded and comma separated Ceph cluster monitor list
|
||||
# - Typically output of: `echo -n "mon1:port,mon2:port,..." | base64`
|
||||
monitors: <BASE64-ENCODED-MONLIST>
|
||||
# Base64 encoded and comma separated list of pool names from which volumes
|
||||
# can be provisioned
|
||||
pools: <BASE64-ENCODED-POOLIST>
|
||||
# Base64 encoded admin ID to use for provisioning
|
||||
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
adminid: <BASE64-ENCODED-ID>
|
||||
# Base64 encoded key of the provisioner admin ID
|
||||
# - Output of: `ceph auth get-key client.<admin-id> | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
adminkey: <BASE64-ENCODED-PASSWORD>
|
||||
# Base64 encoded user ID to use for publishing
|
||||
# - Typically output of: `echo -n "<admin-id>" | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
userid: <BASE64-ENCODED-ID>
|
||||
# Base64 encoded key of the publisher user ID
|
||||
# - Output of: `ceph auth get-key client.<admin-id> | base64`
|
||||
# Substitute the entire string including angle braces, with the base64 value
|
||||
userkey: <BASE64-ENCODED-PASSWORD>
|
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
33
examples/rbd/template-csi-rbdplugin-patch.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
# This is a patch to the existing daemonset deployment of CSI rbdplugin.
|
||||
#
|
||||
# This is to be used when using `clusterID` instead of monitors or
|
||||
# monValueFromSecret in the StorageClass to specify the Ceph cluster to
|
||||
# provision storage from, AND when the value of `--configroot` option to the
|
||||
# CSI pods is NOT "k8s_objects".
|
||||
#
|
||||
# This patch file, patches in the specified secret for the 'clusterID' as a
|
||||
# volume, instead of the Ceph CSI plugin actively fetching and using kubernetes
|
||||
# secrets.
|
||||
#
|
||||
# Post substituting the <cluster-id> in all places execute,
|
||||
# `kubectl patch daemonset csi-rbdplugin --patch\
|
||||
# "$(cat template-csi-rbdplugin-patch.yaml)"`
|
||||
# to patch the daemonset deployment.
|
||||
#
|
||||
# `kubectl patch statefulset csi-rbdplugin-provisioner --patch\
|
||||
# "$(cat template-csi-rbdplugin-patch.yaml)"`
|
||||
# to patch the statefulset deployment.
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-rbdplugin
|
||||
volumeMounts:
|
||||
- name: ceph-cluster-<cluster-id>
|
||||
mountPath: "/etc/csi-config/ceph-cluster-<cluster-id>"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: ceph-cluster-<cluster-id>
|
||||
secret:
|
||||
secretName: ceph-cluster-<cluster-id>
|
@ -17,13 +17,8 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"text/template"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var cephConfig = []byte(`[global]
|
||||
@ -35,39 +30,11 @@ auth_client_required = cephx
|
||||
fuse_set_user_groups = false
|
||||
`)
|
||||
|
||||
const cephKeyring = `[client.{{.UserID}}]
|
||||
key = {{.Key}}
|
||||
`
|
||||
|
||||
const cephSecret = `{{.Key}}` // #nosec
|
||||
|
||||
const (
|
||||
cephConfigRoot = "/etc/ceph"
|
||||
cephConfigPath = "/etc/ceph/ceph.conf"
|
||||
cephKeyringFileNameFmt = "ceph.share.%s.client.%s.keyring"
|
||||
cephSecretFileNameFmt = "ceph.share.%s.client.%s.secret" // #nosec
|
||||
)
|
||||
|
||||
var (
|
||||
cephKeyringTempl *template.Template
|
||||
cephSecretTempl *template.Template
|
||||
)
|
||||
|
||||
func init() {
|
||||
fm := map[string]interface{}{
|
||||
"perms": func(readOnly bool) string {
|
||||
if readOnly {
|
||||
return "r"
|
||||
}
|
||||
|
||||
return "rw"
|
||||
},
|
||||
}
|
||||
|
||||
cephKeyringTempl = template.Must(template.New("keyring").Funcs(fm).Parse(cephKeyring))
|
||||
cephSecretTempl = template.Must(template.New("secret").Parse(cephSecret))
|
||||
}
|
||||
|
||||
func createCephConfigRoot() error {
|
||||
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
|
||||
}
|
||||
@ -79,51 +46,3 @@ func writeCephConfig() error {
|
||||
|
||||
return ioutil.WriteFile(cephConfigPath, cephConfig, 0640)
|
||||
}
|
||||
|
||||
func writeCephTemplate(fileName string, m os.FileMode, t *template.Template, data interface{}) error {
|
||||
if err := createCephConfigRoot(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(path.Join(cephConfigRoot, fileName), os.O_CREATE|os.O_EXCL|os.O_WRONLY, m)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
klog.Errorf("failed to close file %s with error %s", f.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
return t.Execute(f, data)
|
||||
}
|
||||
|
||||
type cephKeyringData struct {
|
||||
UserID, Key string
|
||||
VolumeID volumeID
|
||||
}
|
||||
|
||||
func (d *cephKeyringData) writeToFile() error {
|
||||
return writeCephTemplate(fmt.Sprintf(cephKeyringFileNameFmt, d.VolumeID, d.UserID), 0600, cephKeyringTempl, d)
|
||||
}
|
||||
|
||||
type cephSecretData struct {
|
||||
UserID, Key string
|
||||
VolumeID volumeID
|
||||
}
|
||||
|
||||
func (d *cephSecretData) writeToFile() error {
|
||||
return writeCephTemplate(fmt.Sprintf(cephSecretFileNameFmt, d.VolumeID, d.UserID), 0600, cephSecretTempl, d)
|
||||
}
|
||||
|
||||
func getCephSecretPath(volID volumeID, userID string) string {
|
||||
return path.Join(cephConfigRoot, fmt.Sprintf(cephSecretFileNameFmt, volID, userID))
|
||||
}
|
||||
|
||||
func getCephKeyringPath(volID volumeID, userID string) string {
|
||||
return path.Join(cephConfigRoot, fmt.Sprintf(cephKeyringFileNameFmt, volID, userID))
|
||||
}
|
||||
|
@ -17,12 +17,7 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -53,83 +48,61 @@ func getCephUserName(volID volumeID) string {
|
||||
return cephUserPrefix + string(volID)
|
||||
}
|
||||
|
||||
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
|
||||
entityName := cephEntityClientPrefix + getCephUserName(volID)
|
||||
|
||||
func getSingleCephEntity(args ...string) (*cephEntity, error) {
|
||||
var ents []cephEntity
|
||||
args := [...]string{
|
||||
"-m", volOptions.Monitors,
|
||||
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
|
||||
"get", entityName,
|
||||
}
|
||||
|
||||
out, err := execCommand("ceph", args[:]...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cephfs: ceph failed with following error: %s\ncephfs: ceph output: %s", err, out)
|
||||
}
|
||||
|
||||
// Workaround for output from `ceph auth get`
|
||||
// Contains non-json data: "exported keyring for ENTITY\n\n"
|
||||
offset := bytes.Index(out, []byte("[{"))
|
||||
|
||||
if err = json.NewDecoder(bytes.NewReader(out[offset:])).Decode(&ents); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode json: %v", err)
|
||||
if err := execCommandJSON(&ents, "ceph", args...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ents) != 1 {
|
||||
return nil, fmt.Errorf("got unexpected number of entities for %s: expected 1, got %d", entityName, len(ents))
|
||||
return nil, fmt.Errorf("got unexpected number of entities: expected 1, got %d", len(ents))
|
||||
}
|
||||
|
||||
return &ents[0], nil
|
||||
}
|
||||
|
||||
func genUserIDs(adminCr *credentials, volID volumeID) (adminID, userID string) {
|
||||
return cephEntityClientPrefix + adminCr.id, cephEntityClientPrefix + getCephUserName(volID)
|
||||
}
|
||||
|
||||
func getCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
|
||||
adminID, userID := genUserIDs(adminCr, volID)
|
||||
|
||||
return getSingleCephEntity(
|
||||
"-m", volOptions.Monitors,
|
||||
"-n", adminID,
|
||||
"--key="+adminCr.key,
|
||||
"-c", cephConfigPath,
|
||||
"-f", "json",
|
||||
"auth", "get", userID,
|
||||
)
|
||||
}
|
||||
|
||||
func createCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) (*cephEntity, error) {
|
||||
caps := cephEntityCaps{
|
||||
Mds: fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)),
|
||||
Mon: "allow r",
|
||||
Osd: fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)),
|
||||
}
|
||||
adminID, userID := genUserIDs(adminCr, volID)
|
||||
|
||||
var ents []cephEntity
|
||||
args := [...]string{
|
||||
return getSingleCephEntity(
|
||||
"-m", volOptions.Monitors,
|
||||
"auth", "-f", "json", "-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
|
||||
"get-or-create", cephEntityClientPrefix + getCephUserName(volID),
|
||||
"mds", caps.Mds,
|
||||
"mon", caps.Mon,
|
||||
"osd", caps.Osd,
|
||||
}
|
||||
|
||||
if err := execCommandJSON(&ents, args[:]...); err != nil {
|
||||
return nil, fmt.Errorf("error creating ceph user: %v", err)
|
||||
}
|
||||
|
||||
return &ents[0], nil
|
||||
"-n", adminID,
|
||||
"--key="+adminCr.key,
|
||||
"-c", cephConfigPath,
|
||||
"-f", "json",
|
||||
"auth", "get-or-create", userID,
|
||||
// User capabilities
|
||||
"mds", fmt.Sprintf("allow rw path=%s", getVolumeRootPathCeph(volID)),
|
||||
"mon", "allow r",
|
||||
"osd", fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)),
|
||||
)
|
||||
}
|
||||
|
||||
func deleteCephUser(volOptions *volumeOptions, adminCr *credentials, volID volumeID) error {
|
||||
userID := getCephUserName(volID)
|
||||
adminID, userID := genUserIDs(adminCr, volID)
|
||||
|
||||
args := [...]string{
|
||||
return execCommandErr("ceph",
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", cephConfigPath, "-n", cephEntityClientPrefix + adminCr.id, "--keyring", getCephKeyringPath(volID, adminCr.id),
|
||||
"auth", "rm", cephEntityClientPrefix + userID,
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = execCommandAndValidate("ceph", args[:]...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyringPath := getCephKeyringPath(volID, adminCr.id)
|
||||
if err = os.Remove(keyringPath); err != nil {
|
||||
klog.Errorf("failed to remove keyring file %s with error %s", keyringPath, err)
|
||||
}
|
||||
|
||||
secretPath := getCephSecretPath(volID, adminCr.id)
|
||||
if err = os.Remove(secretPath); err != nil {
|
||||
klog.Errorf("failed to remove secret file %s with error %s", secretPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
"-n", adminID,
|
||||
"--key="+adminCr.key,
|
||||
"-c", cephConfigPath,
|
||||
"auth", "rm", userID,
|
||||
)
|
||||
}
|
||||
|
@ -17,15 +17,15 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
)
|
||||
|
||||
// ControllerServer struct of CEPH CSI driver with supported methods of CSI
|
||||
@ -40,6 +40,10 @@ type controllerCacheEntry struct {
|
||||
VolumeID volumeID
|
||||
}
|
||||
|
||||
var (
|
||||
mtxControllerVolumeID = keymutex.NewHashed(0)
|
||||
)
|
||||
|
||||
// CreateVolume creates the volume in backend and store the volume metadata
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||
@ -58,6 +62,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
|
||||
volID := makeVolumeID(req.GetName())
|
||||
|
||||
mtxControllerVolumeID.LockKey(string(volID))
|
||||
defer mustUnlock(mtxControllerVolumeID, string(volID))
|
||||
|
||||
// Create a volume in case the user didn't provide one
|
||||
|
||||
if volOptions.ProvisionVolume {
|
||||
@ -67,11 +74,6 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
if err = storeCephCredentials(volID, cr); err != nil {
|
||||
klog.Errorf("failed to store admin credentials for '%s': %v", cr.id, err)
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err = createVolume(volOptions, cr, volID, req.GetCapacityRange().GetRequiredBytes()); err != nil {
|
||||
klog.Errorf("failed to create volume %s: %v", req.GetName(), err)
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -102,8 +104,9 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteVolume deletes the volume in backend and removes the volume metadata
|
||||
// from store
|
||||
// DeleteVolume deletes the volume in backend
|
||||
// and removes the volume metadata from store
|
||||
// nolint: gocyclo
|
||||
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
if err := cs.validateDeleteVolumeRequest(); err != nil {
|
||||
klog.Errorf("DeleteVolumeRequest validation failed: %v", err)
|
||||
@ -113,11 +116,15 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
var (
|
||||
volID = volumeID(req.GetVolumeId())
|
||||
secrets = req.GetSecrets()
|
||||
err error
|
||||
)
|
||||
|
||||
ce := &controllerCacheEntry{}
|
||||
if err = cs.MetadataStore.Get(string(volID), ce); err != nil {
|
||||
if err := cs.MetadataStore.Get(string(volID), ce); err != nil {
|
||||
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||
klog.Infof("cephfs: metadata for volume %s not found, assuming the volume to be already deleted (%v)", volID, err)
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -143,6 +150,9 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
mtxControllerVolumeID.LockKey(string(volID))
|
||||
defer mustUnlock(mtxControllerVolumeID, string(volID))
|
||||
|
||||
if err = purgeVolume(volID, cr, &ce.VolOptions); err != nil {
|
||||
klog.Errorf("failed to delete volume %s: %v", volID, err)
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
|
@ -19,19 +19,21 @@ package cephfs
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
)
|
||||
|
||||
const (
|
||||
// PluginFolder defines the location of ceph plugin
|
||||
PluginFolder = "/var/lib/kubelet/plugins/csi-cephfsplugin"
|
||||
|
||||
// version of ceph driver
|
||||
version = "1.0.0"
|
||||
)
|
||||
|
||||
// PluginFolder defines the location of ceph plugin
|
||||
var PluginFolder = "/var/lib/kubelet/plugins/"
|
||||
|
||||
// Driver contains the default identity,node and controller struct
|
||||
type Driver struct {
|
||||
cd *csicommon.CSIDriver
|
||||
@ -75,7 +77,7 @@ func NewNodeServer(d *csicommon.CSIDriver) *NodeServer {
|
||||
|
||||
// Run start a non-blocking grpc controller,node and identityserver for
|
||||
// ceph CSI driver which can serve multiple parallel requests
|
||||
func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cachePersister util.CachePersister) {
|
||||
func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir string, cachePersister util.CachePersister) {
|
||||
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||
|
||||
// Configuration
|
||||
@ -103,6 +105,13 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter string, cacheP
|
||||
klog.Fatalf("failed to write ceph configuration file: %v", err)
|
||||
}
|
||||
|
||||
initVolumeMountCache(driverName, mountCacheDir, cachePersister)
|
||||
if mountCacheDir != "" {
|
||||
if err := remountCachedVolumes(); err != nil {
|
||||
klog.Warningf("failed to remount cached volumes: %v", err)
|
||||
//ignore remount fail
|
||||
}
|
||||
}
|
||||
// Initialize default library driver
|
||||
|
||||
fs.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
|
||||
|
@ -19,8 +19,9 @@ package cephfs
|
||||
import (
|
||||
"context"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
)
|
||||
|
||||
// IdentityServer struct of ceph CSI driver with supported methods of CSI
|
||||
|
311
pkg/cephfs/mountcache.go
Normal file
311
pkg/cephfs/mountcache.go
Normal file
@ -0,0 +1,311 @@
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type volumeMountCacheEntry struct {
|
||||
DriverVersion string `json:"driverVersion"`
|
||||
|
||||
VolumeID string `json:"volumeID"`
|
||||
Secrets map[string]string `json:"secrets"`
|
||||
StagingPath string `json:"stagingPath"`
|
||||
TargetPaths map[string]bool `json:"targetPaths"`
|
||||
CreateTime time.Time `json:"createTime"`
|
||||
}
|
||||
|
||||
type volumeMountCacheMap struct {
|
||||
volumes map[string]volumeMountCacheEntry
|
||||
nodeCacheStore util.NodeCache
|
||||
metadataStore util.CachePersister
|
||||
}
|
||||
|
||||
var (
|
||||
volumeMountCachePrefix = "cephfs-mount-cache-"
|
||||
volumeMountCache volumeMountCacheMap
|
||||
volumeMountCacheMtx sync.Mutex
|
||||
)
|
||||
|
||||
func initVolumeMountCache(driverName string, mountCacheDir string, cachePersister util.CachePersister) {
|
||||
volumeMountCache.volumes = make(map[string]volumeMountCacheEntry)
|
||||
|
||||
volumeMountCache.metadataStore = cachePersister
|
||||
volumeMountCache.nodeCacheStore.BasePath = mountCacheDir
|
||||
volumeMountCache.nodeCacheStore.CacheDir = driverName
|
||||
klog.Infof("mount-cache: name: %s, version: %s, mountCacheDir: %s", driverName, version, mountCacheDir)
|
||||
}
|
||||
|
||||
func remountCachedVolumes() error {
|
||||
if err := os.MkdirAll(volumeMountCache.nodeCacheStore.BasePath, 0755); err != nil {
|
||||
klog.Errorf("mount-cache: failed to create %s: %v", volumeMountCache.nodeCacheStore.BasePath, err)
|
||||
return err
|
||||
}
|
||||
var remountFailCount, remountSuccCount int64
|
||||
me := &volumeMountCacheEntry{}
|
||||
ce := &controllerCacheEntry{}
|
||||
err := volumeMountCache.nodeCacheStore.ForAll(volumeMountCachePrefix, me, func(identifier string) error {
|
||||
volID := me.VolumeID
|
||||
if err := volumeMountCache.metadataStore.Get(volID, ce); err != nil {
|
||||
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||
klog.Infof("mount-cache: metadata not found, assuming the volume %s to be already deleted (%v)", volID, err)
|
||||
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
||||
klog.Infof("mount-cache: metadata not found, delete volume cache entry for volume %s", volID)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := mountOneCacheEntry(ce, me); err == nil {
|
||||
remountSuccCount++
|
||||
volumeMountCache.volumes[me.VolumeID] = *me
|
||||
klog.Infof("mount-cache: successfully remounted volume %s", volID)
|
||||
} else {
|
||||
remountFailCount++
|
||||
klog.Errorf("mount-cache: failed to remount volume %s", volID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("mount-cache: metastore list cache fail %v", err)
|
||||
return err
|
||||
}
|
||||
if remountFailCount > 0 {
|
||||
klog.Infof("mount-cache: successfully remounted %d volumes, failed to remount %d volumes", remountSuccCount, remountFailCount)
|
||||
} else {
|
||||
klog.Infof("mount-cache: successfully remounted %d volumes", remountSuccCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountOneCacheEntry(ce *controllerCacheEntry, me *volumeMountCacheEntry) error {
|
||||
volumeMountCacheMtx.Lock()
|
||||
defer volumeMountCacheMtx.Unlock()
|
||||
|
||||
var (
|
||||
err error
|
||||
cr *credentials
|
||||
)
|
||||
volID := ce.VolumeID
|
||||
volOptions := ce.VolOptions
|
||||
|
||||
if volOptions.ProvisionVolume {
|
||||
volOptions.RootPath = getVolumeRootPathCeph(volID)
|
||||
cr, err = getAdminCredentials(decodeCredentials(me.Secrets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var entity *cephEntity
|
||||
entity, err = getCephUser(&volOptions, cr, volID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr = entity.toCredentials()
|
||||
} else {
|
||||
cr, err = getUserCredentials(decodeCredentials(me.Secrets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = cleanupMountPoint(me.StagingPath)
|
||||
if err != nil {
|
||||
klog.Infof("mount-cache: failed to cleanup volume mount point %s, remove it: %s %v", volID, me.StagingPath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
isMnt, err := isMountPoint(me.StagingPath)
|
||||
if err != nil {
|
||||
isMnt = false
|
||||
klog.Infof("mount-cache: failed to check volume mounted %s: %s %v", volID, me.StagingPath, err)
|
||||
}
|
||||
|
||||
if !isMnt {
|
||||
m, err := newMounter(&volOptions)
|
||||
if err != nil {
|
||||
klog.Errorf("mount-cache: failed to create mounter for volume %s: %v", volID, err)
|
||||
return err
|
||||
}
|
||||
if err := m.mount(me.StagingPath, cr, &volOptions); err != nil {
|
||||
klog.Errorf("mount-cache: failed to mount volume %s: %v", volID, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
for targetPath, readOnly := range me.TargetPaths {
|
||||
if err := cleanupMountPoint(targetPath); err == nil {
|
||||
if err := bindMount(me.StagingPath, targetPath, readOnly); err != nil {
|
||||
klog.Errorf("mount-cache: failed to bind-mount volume %s: %s %s %v %v",
|
||||
volID, me.StagingPath, targetPath, readOnly, err)
|
||||
} else {
|
||||
klog.Infof("mount-cache: successfully bind-mounted volume %s: %s %s %v",
|
||||
volID, me.StagingPath, targetPath, readOnly)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupMountPoint(mountPoint string) error {
|
||||
if _, err := os.Stat(mountPoint); err != nil {
|
||||
if isCorruptedMnt(err) {
|
||||
klog.Infof("mount-cache: corrupted mount point %s, need unmount", mountPoint)
|
||||
err := execCommandErr("umount", mountPoint)
|
||||
if err != nil {
|
||||
klog.Infof("mount-cache: failed to umount %s %v", mountPoint, err)
|
||||
//ignore error return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(mountPoint); err != nil {
|
||||
klog.Errorf("mount-cache: failed to stat mount point %s %v", mountPoint, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isCorruptedMnt(err error) bool {
|
||||
var underlyingError error
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return false
|
||||
case *os.PathError:
|
||||
underlyingError = pe.Err
|
||||
case *os.LinkError:
|
||||
underlyingError = pe.Err
|
||||
case *os.SyscallError:
|
||||
underlyingError = pe.Err
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
CorruptedErrors := []error{
|
||||
syscall.ENOTCONN, syscall.ESTALE, syscall.EIO, syscall.EACCES}
|
||||
|
||||
for _, v := range CorruptedErrors {
|
||||
if underlyingError == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func genVolumeMountCacheFileName(volID string) string {
|
||||
cachePath := volumeMountCachePrefix + volID
|
||||
return cachePath
|
||||
}
|
||||
func (mc *volumeMountCacheMap) isEnable() bool {
|
||||
//if mount cache dir unset, disable state
|
||||
return mc.nodeCacheStore.BasePath != ""
|
||||
}
|
||||
|
||||
func (mc *volumeMountCacheMap) nodeStageVolume(volID string, stagingTargetPath string, secrets map[string]string) error {
|
||||
if !mc.isEnable() {
|
||||
return nil
|
||||
}
|
||||
volumeMountCacheMtx.Lock()
|
||||
defer volumeMountCacheMtx.Unlock()
|
||||
|
||||
lastTargetPaths := make(map[string]bool)
|
||||
me, ok := volumeMountCache.volumes[volID]
|
||||
if ok {
|
||||
if me.StagingPath == stagingTargetPath {
|
||||
klog.Warningf("mount-cache: node unexpected restage volume for volume %s", volID)
|
||||
return nil
|
||||
}
|
||||
lastTargetPaths = me.TargetPaths
|
||||
klog.Warningf("mount-cache: node stage volume ignore last cache entry for volume %s", volID)
|
||||
}
|
||||
|
||||
me = volumeMountCacheEntry{DriverVersion: version}
|
||||
|
||||
me.VolumeID = volID
|
||||
me.Secrets = encodeCredentials(secrets)
|
||||
me.StagingPath = stagingTargetPath
|
||||
me.TargetPaths = lastTargetPaths
|
||||
|
||||
me.CreateTime = time.Now()
|
||||
volumeMountCache.volumes[volID] = me
|
||||
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
||||
}
|
||||
|
||||
func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error {
|
||||
if !mc.isEnable() {
|
||||
return nil
|
||||
}
|
||||
volumeMountCacheMtx.Lock()
|
||||
defer volumeMountCacheMtx.Unlock()
|
||||
delete(volumeMountCache.volumes, volID)
|
||||
return mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID))
|
||||
}
|
||||
|
||||
func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error {
|
||||
if !mc.isEnable() {
|
||||
return nil
|
||||
}
|
||||
volumeMountCacheMtx.Lock()
|
||||
defer volumeMountCacheMtx.Unlock()
|
||||
|
||||
_, ok := volumeMountCache.volumes[volID]
|
||||
if !ok {
|
||||
return errors.New("mount-cache: node publish volume failed to find cache entry for volume")
|
||||
}
|
||||
volumeMountCache.volumes[volID].TargetPaths[targetPath] = readOnly
|
||||
return mc.updateNodeCache(volID)
|
||||
}
|
||||
|
||||
func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error {
|
||||
if !mc.isEnable() {
|
||||
return nil
|
||||
}
|
||||
volumeMountCacheMtx.Lock()
|
||||
defer volumeMountCacheMtx.Unlock()
|
||||
|
||||
_, ok := volumeMountCache.volumes[volID]
|
||||
if !ok {
|
||||
return errors.New("mount-cache: node unpublish volume failed to find cache entry for volume")
|
||||
}
|
||||
delete(volumeMountCache.volumes[volID].TargetPaths, targetPath)
|
||||
return mc.updateNodeCache(volID)
|
||||
}
|
||||
|
||||
func (mc *volumeMountCacheMap) updateNodeCache(volID string) error {
|
||||
me := volumeMountCache.volumes[volID]
|
||||
if err := volumeMountCache.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID)); err == nil {
|
||||
klog.Infof("mount-cache: metadata not found, delete mount cache failed for volume %s", volID)
|
||||
}
|
||||
return mc.nodeCacheStore.Create(genVolumeMountCacheFileName(volID), me)
|
||||
}
|
||||
|
||||
func encodeCredentials(input map[string]string) (output map[string]string) {
|
||||
output = make(map[string]string)
|
||||
for key, value := range input {
|
||||
nKey := base64.StdEncoding.EncodeToString([]byte(key))
|
||||
nValue := base64.StdEncoding.EncodeToString([]byte(value))
|
||||
output[nKey] = nValue
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func decodeCredentials(input map[string]string) (output map[string]string) {
|
||||
output = make(map[string]string)
|
||||
for key, value := range input {
|
||||
nKey, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
klog.Errorf("mount-cache: decode secret fail")
|
||||
continue
|
||||
}
|
||||
nValue, err := base64.StdEncoding.DecodeString(value)
|
||||
if err != nil {
|
||||
klog.Errorf("mount-cache: decode secret fail")
|
||||
continue
|
||||
}
|
||||
output[string(nKey)] = string(nValue)
|
||||
}
|
||||
return output
|
||||
}
|
38
pkg/cephfs/mountcache_test.go
Normal file
38
pkg/cephfs/mountcache_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
}
|
||||
|
||||
func TestMountOneCacheEntry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemountHisMountedPath(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeStageVolume(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeUnStageVolume(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodePublishVolume(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeUnpublishVolume(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncodeDecodeCredentials(t *testing.T) {
|
||||
secrets := make(map[string]string)
|
||||
secrets["user_1"] = "value_1"
|
||||
enSecrets := encodeCredentials(secrets)
|
||||
deSecrets := decodeCredentials(enSecrets)
|
||||
for key, value := range secrets {
|
||||
if deSecrets[key] != value {
|
||||
t.Errorf("key %s of credentials's value %s change after decode %s ", key, value, deSecrets[key])
|
||||
}
|
||||
}
|
||||
}
|
@ -21,12 +21,13 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
)
|
||||
|
||||
// NodeServer struct of ceph CSI driver with supported methods of CSI
|
||||
@ -35,6 +36,10 @@ type NodeServer struct {
|
||||
*csicommon.DefaultNodeServer
|
||||
}
|
||||
|
||||
var (
|
||||
mtxNodeVolumeID = keymutex.NewHashed(0)
|
||||
)
|
||||
|
||||
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
|
||||
var (
|
||||
cr *credentials
|
||||
@ -44,17 +49,13 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
|
||||
if volOptions.ProvisionVolume {
|
||||
// The volume is provisioned dynamically, get the credentials directly from Ceph
|
||||
|
||||
// First, store admin credentials - those are needed for retrieving the user credentials
|
||||
// First, get admin credentials - those are needed for retrieving the user credentials
|
||||
|
||||
adminCr, err := getAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
|
||||
}
|
||||
|
||||
if err = storeCephCredentials(volID, adminCr); err != nil {
|
||||
return nil, fmt.Errorf("failed to store ceph admin credentials: %v", err)
|
||||
}
|
||||
|
||||
// Then get the ceph user
|
||||
|
||||
entity, err := getCephUser(volOptions, adminCr, volID)
|
||||
@ -74,10 +75,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi
|
||||
cr = userCr
|
||||
}
|
||||
|
||||
if err := storeCephCredentials(volID, cr); err != nil {
|
||||
return nil, fmt.Errorf("failed to store ceph user credentials: %v", err)
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
@ -108,6 +105,9 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
mtxNodeVolumeID.LockKey(string(volID))
|
||||
defer mustUnlock(mtxNodeVolumeID, string(volID))
|
||||
|
||||
// Check if the volume is already mounted
|
||||
|
||||
isMnt, err := isMountPoint(stagingTargetPath)
|
||||
@ -150,10 +150,13 @@ func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequ
|
||||
|
||||
klog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name())
|
||||
|
||||
if err = m.mount(stagingTargetPath, cr, volOptions, volID); err != nil {
|
||||
if err = m.mount(stagingTargetPath, cr, volOptions); err != nil {
|
||||
klog.Errorf("failed to mount volume %s: %v", volID, err)
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if err := volumeMountCache.nodeStageVolume(req.GetVolumeId(), stagingTargetPath, req.GetSecrets()); err != nil {
|
||||
klog.Warningf("mount-cache: failed to stage volume %s %s: %v", volID, stagingTargetPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -195,6 +198,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err := volumeMountCache.nodePublishVolume(volID, targetPath, req.GetReadonly()); err != nil {
|
||||
klog.Warningf("mount-cache: failed to publish volume %s %s: %v", volID, targetPath, err)
|
||||
}
|
||||
|
||||
klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
@ -209,6 +216,11 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
|
||||
targetPath := req.GetTargetPath()
|
||||
|
||||
volID := req.GetVolumeId()
|
||||
if err = volumeMountCache.nodeUnPublishVolume(volID, targetPath); err != nil {
|
||||
klog.Warningf("mount-cache: failed to unpublish volume %s %s: %v", volID, targetPath, err)
|
||||
}
|
||||
|
||||
// Unmount the bind-mount
|
||||
if err = unmountVolume(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -232,6 +244,11 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
|
||||
volID := req.GetVolumeId()
|
||||
if err = volumeMountCache.nodeUnStageVolume(volID); err != nil {
|
||||
klog.Warningf("mount-cache: failed to unstage volume %s %s: %v", volID, stagingTargetPath, err)
|
||||
}
|
||||
|
||||
// Unmount the volume
|
||||
if err = unmountVolume(stagingTargetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -241,7 +258,7 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.Infof("cephfs: successfully umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||
klog.Infof("cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
@ -21,49 +21,70 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
type volumeID string
|
||||
|
||||
func mustUnlock(m keymutex.KeyMutex, key string) {
|
||||
if err := m.UnlockKey(key); err != nil {
|
||||
klog.Fatalf("failed to unlock mutex for %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeVolumeID(volName string) volumeID {
|
||||
return volumeID("csi-cephfs-" + volName)
|
||||
}
|
||||
|
||||
func execCommand(command string, args ...string) ([]byte, error) {
|
||||
klog.V(4).Infof("cephfs: EXEC %s %s", command, args)
|
||||
func execCommand(program string, args ...string) (stdout, stderr []byte, err error) {
|
||||
var (
|
||||
cmd = exec.Command(program, args...) // nolint: gosec
|
||||
sanitizedArgs = util.StripSecretInArgs(args)
|
||||
stdoutBuf bytes.Buffer
|
||||
stderrBuf bytes.Buffer
|
||||
)
|
||||
|
||||
cmd := exec.Command(command, args...) // #nosec
|
||||
return cmd.CombinedOutput()
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
klog.V(4).Infof("cephfs: EXEC %s %s", program, sanitizedArgs)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, nil, fmt.Errorf("an error occurred while running (%d) %s %v: %v: %s",
|
||||
cmd.Process.Pid, program, sanitizedArgs, err, stderrBuf.Bytes())
|
||||
}
|
||||
|
||||
func execCommandAndValidate(program string, args ...string) error {
|
||||
out, err := execCommand(program, args...)
|
||||
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
|
||||
}
|
||||
|
||||
func execCommandErr(program string, args ...string) error {
|
||||
_, _, err := execCommand(program, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func execCommandJSON(v interface{}, program string, args ...string) error {
|
||||
stdout, _, err := execCommand(program, args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(stdout, v); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal JSON for %s %v: %s: %v", program, util.StripSecretInArgs(args), stdout, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func execCommandJSON(v interface{}, args ...string) error {
|
||||
program := "ceph"
|
||||
out, err := execCommand(program, args...)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("cephfs: %s failed with following error: %s\ncephfs: %s output: %s", program, err, program, out)
|
||||
}
|
||||
|
||||
return json.NewDecoder(bytes.NewReader(out)).Decode(v)
|
||||
}
|
||||
|
||||
// Used in isMountPoint()
|
||||
var dummyMount = mount.New("")
|
||||
|
||||
@ -76,31 +97,12 @@ func isMountPoint(p string) (bool, error) {
|
||||
return !notMnt, nil
|
||||
}
|
||||
|
||||
func storeCephCredentials(volID volumeID, cr *credentials) error {
|
||||
keyringData := cephKeyringData{
|
||||
UserID: cr.id,
|
||||
Key: cr.key,
|
||||
VolumeID: volID,
|
||||
func pathExists(p string) bool {
|
||||
_, err := os.Stat(p)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
if err := keyringData.writeToFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secret := cephSecretData{
|
||||
UserID: cr.id,
|
||||
Key: cr.key,
|
||||
VolumeID: volID,
|
||||
}
|
||||
|
||||
err := secret.writeToFile()
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
// Controller service request validation
|
||||
//
|
||||
|
||||
func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
|
||||
return fmt.Errorf("invalid CreateVolumeRequest: %v", err)
|
||||
@ -132,10 +134,7 @@ func (cs *ControllerServer) validateDeleteVolumeRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Node service request validation
|
||||
//
|
||||
|
||||
func validateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error {
|
||||
if req.GetVolumeCapability() == nil {
|
||||
return errors.New("volume capability missing in request")
|
||||
@ -178,7 +177,7 @@ func validateNodePublishVolumeRequest(req *csi.NodePublishVolumeRequest) error {
|
||||
}
|
||||
|
||||
if req.GetTargetPath() == "" {
|
||||
return errors.New("varget path missing in request")
|
||||
return errors.New("target path missing in request")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -25,14 +25,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
cephRootPrefix = PluginFolder + "/controller/volumes/root-"
|
||||
cephVolumesRoot = "csi-volumes"
|
||||
|
||||
namespacePrefix = "ns-"
|
||||
)
|
||||
|
||||
func getCephRootPathLocal(volID volumeID) string {
|
||||
return cephRootPrefix + string(volID)
|
||||
return fmt.Sprintf("%s/controller/volumes/root-%s", PluginFolder, string(volID))
|
||||
}
|
||||
|
||||
func getCephRootVolumePathLocal(volID volumeID) string {
|
||||
@ -48,83 +47,70 @@ func getVolumeNamespace(volID volumeID) string {
|
||||
}
|
||||
|
||||
func setVolumeAttribute(root, attrName, attrValue string) error {
|
||||
return execCommandAndValidate("setfattr", "-n", attrName, "-v", attrValue, root)
|
||||
return execCommandErr("setfattr", "-n", attrName, "-v", attrValue, root)
|
||||
}
|
||||
|
||||
func createVolume(volOptions *volumeOptions, adminCr *credentials, volID volumeID, bytesQuota int64) error {
|
||||
cephRoot := getCephRootPathLocal(volID)
|
||||
|
||||
if err := createMountPoint(cephRoot); err != nil {
|
||||
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
|
||||
return err
|
||||
}
|
||||
defer unmountCephRoot(volID)
|
||||
|
||||
// RootPath is not set for a dynamically provisioned volume
|
||||
// Access to cephfs's / is required
|
||||
volOptions.RootPath = "/"
|
||||
var (
|
||||
volRoot = getCephRootVolumePathLocal(volID)
|
||||
volRootCreating = volRoot + "-creating"
|
||||
)
|
||||
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create mounter: %v", err)
|
||||
if pathExists(volRoot) {
|
||||
klog.V(4).Infof("cephfs: volume %s already exists, skipping creation", volID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil {
|
||||
return fmt.Errorf("error mounting ceph root: %v", err)
|
||||
}
|
||||
|
||||
defer unmountAndRemove(cephRoot)
|
||||
|
||||
volOptions.RootPath = getVolumeRootPathCeph(volID)
|
||||
localVolRoot := getCephRootVolumePathLocal(volID)
|
||||
|
||||
if err := createMountPoint(localVolRoot); err != nil {
|
||||
if err := createMountPoint(volRootCreating); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytesQuota > 0 {
|
||||
if err := setVolumeAttribute(localVolRoot, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
||||
if err := setVolumeAttribute(volRootCreating, "ceph.quota.max_bytes", fmt.Sprintf("%d", bytesQuota)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
||||
if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool", volOptions.Pool); err != nil {
|
||||
return fmt.Errorf("%v\ncephfs: Does pool '%s' exist?", err, volOptions.Pool)
|
||||
}
|
||||
|
||||
if err := setVolumeAttribute(localVolRoot, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil {
|
||||
if err := setVolumeAttribute(volRootCreating, "ceph.dir.layout.pool_namespace", getVolumeNamespace(volID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Rename(volRootCreating, volRoot); err != nil {
|
||||
return fmt.Errorf("couldn't mark volume %s as created: %v", volID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions) error {
|
||||
if err := mountCephRoot(volID, volOptions, adminCr); err != nil {
|
||||
return err
|
||||
}
|
||||
defer unmountCephRoot(volID)
|
||||
|
||||
var (
|
||||
cephRoot = getCephRootPathLocal(volID)
|
||||
volRoot = getCephRootVolumePathLocal(volID)
|
||||
volRootDeleting = volRoot + "-deleting"
|
||||
)
|
||||
|
||||
if err := createMountPoint(cephRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Root path is not set for dynamically provisioned volumes
|
||||
// Access to cephfs's / is required
|
||||
volOptions.RootPath = "/"
|
||||
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create mounter: %v", err)
|
||||
}
|
||||
|
||||
if err = m.mount(cephRoot, adminCr, volOptions, volID); err != nil {
|
||||
return fmt.Errorf("error mounting ceph root: %v", err)
|
||||
}
|
||||
|
||||
defer unmountAndRemove(cephRoot)
|
||||
|
||||
if pathExists(volRoot) {
|
||||
if err := os.Rename(volRoot, volRootDeleting); err != nil {
|
||||
return fmt.Errorf("coudln't mark volume %s for deletion: %v", volID, err)
|
||||
return fmt.Errorf("couldn't mark volume %s for deletion: %v", volID, err)
|
||||
}
|
||||
} else {
|
||||
if !pathExists(volRootDeleting) {
|
||||
klog.V(4).Infof("cephfs: volume %s not found, assuming it to be already deleted", volID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(volRootDeleting); err != nil {
|
||||
@ -134,13 +120,37 @@ func purgeVolume(volID volumeID, adminCr *credentials, volOptions *volumeOptions
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmountAndRemove(mountPoint string) {
|
||||
var err error
|
||||
if err = unmountVolume(mountPoint); err != nil {
|
||||
klog.Errorf("failed to unmount %s with error %s", mountPoint, err)
|
||||
func mountCephRoot(volID volumeID, volOptions *volumeOptions, adminCr *credentials) error {
|
||||
cephRoot := getCephRootPathLocal(volID)
|
||||
|
||||
// Root path is not set for dynamically provisioned volumes
|
||||
// Access to cephfs's / is required
|
||||
volOptions.RootPath = "/"
|
||||
|
||||
if err := createMountPoint(cephRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Remove(mountPoint); err != nil {
|
||||
klog.Errorf("failed to remove %s with error %s", mountPoint, err)
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create mounter: %v", err)
|
||||
}
|
||||
|
||||
if err = m.mount(cephRoot, adminCr, volOptions); err != nil {
|
||||
return fmt.Errorf("error mounting ceph root: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmountCephRoot(volID volumeID) {
|
||||
cephRoot := getCephRootPathLocal(volID)
|
||||
|
||||
if err := unmountVolume(cephRoot); err != nil {
|
||||
klog.Errorf("failed to unmount %s with error %s", cephRoot, err)
|
||||
} else {
|
||||
if err := os.Remove(cephRoot); err != nil {
|
||||
klog.Errorf("failed to remove %s with error %s", cephRoot, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,15 @@ limitations under the License.
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,6 +35,12 @@ const (
|
||||
|
||||
var (
|
||||
availableMounters []string
|
||||
|
||||
// maps a mountpoint to PID of its FUSE daemon
|
||||
fusePidMap = make(map[string]int)
|
||||
fusePidMapMtx sync.Mutex
|
||||
|
||||
fusePidRx = regexp.MustCompile(`(?m)^ceph-fuse\[(.+)\]: starting fuse$`)
|
||||
)
|
||||
|
||||
// Load available ceph mounters installed on system into availableMounters
|
||||
@ -57,7 +67,7 @@ func loadAvailableMounters() error {
|
||||
}
|
||||
|
||||
type volumeMounter interface {
|
||||
mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error
|
||||
mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error
|
||||
name() string
|
||||
}
|
||||
|
||||
@ -101,72 +111,84 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
||||
|
||||
type fuseMounter struct{}
|
||||
|
||||
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
|
||||
func mountFuse(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||
args := [...]string{
|
||||
mountPoint,
|
||||
"-m", volOptions.Monitors,
|
||||
"-c", cephConfigPath,
|
||||
"-n", cephEntityClientPrefix + cr.id,
|
||||
"--keyring", getCephKeyringPath(volID, cr.id),
|
||||
"-n", cephEntityClientPrefix + cr.id, "--key=" + cr.key,
|
||||
"-r", volOptions.RootPath,
|
||||
"-o", "nonempty",
|
||||
}
|
||||
|
||||
out, err := execCommand("ceph-fuse", args[:]...)
|
||||
_, stderr, err := execCommand("ceph-fuse", args[:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cephfs: ceph-fuse failed with following error: %s\ncephfs: ceph-fuse output: %s", err, out)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Contains(out, []byte("starting fuse")) {
|
||||
return fmt.Errorf("cephfs: ceph-fuse failed:\ncephfs: ceph-fuse output: %s", out)
|
||||
// Parse the output:
|
||||
// We need "starting fuse" meaning the mount is ok
|
||||
// and PID of the ceph-fuse daemon for unmount
|
||||
|
||||
match := fusePidRx.FindSubmatch(stderr)
|
||||
if len(match) != 2 {
|
||||
return fmt.Errorf("ceph-fuse failed: %s", stderr)
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(match[1]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse FUSE daemon PID: %v", err)
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
fusePidMap[mountPoint] = pid
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
|
||||
func (m *fuseMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||
if err := createMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountFuse(mountPoint, cr, volOptions, volID)
|
||||
return mountFuse(mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
|
||||
|
||||
type kernelMounter struct{}
|
||||
|
||||
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
|
||||
if err := execCommandAndValidate("modprobe", "ceph"); err != nil {
|
||||
func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||
if err := execCommandErr("modprobe", "ceph"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return execCommandAndValidate("mount",
|
||||
return execCommandErr("mount",
|
||||
"-t", "ceph",
|
||||
fmt.Sprintf("%s:%s", volOptions.Monitors, volOptions.RootPath),
|
||||
mountPoint,
|
||||
"-o",
|
||||
fmt.Sprintf("name=%s,secretfile=%s", cr.id, getCephSecretPath(volID, cr.id)),
|
||||
"-o", fmt.Sprintf("name=%s,secret=%s", cr.id, cr.key),
|
||||
)
|
||||
}
|
||||
|
||||
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions, volID volumeID) error {
|
||||
func (m *kernelMounter) mount(mountPoint string, cr *credentials, volOptions *volumeOptions) error {
|
||||
if err := createMountPoint(mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mountKernel(mountPoint, cr, volOptions, volID)
|
||||
return mountKernel(mountPoint, cr, volOptions)
|
||||
}
|
||||
|
||||
func (m *kernelMounter) name() string { return "Ceph kernel client" }
|
||||
|
||||
func bindMount(from, to string, readOnly bool) error {
|
||||
if err := execCommandAndValidate("mount", "--bind", from, to); err != nil {
|
||||
if err := execCommandErr("mount", "--bind", from, to); err != nil {
|
||||
return fmt.Errorf("failed to bind-mount %s to %s: %v", from, to, err)
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
if err := execCommandAndValidate("mount", "-o", "remount,ro,bind", to); err != nil {
|
||||
if err := execCommandErr("mount", "-o", "remount,ro,bind", to); err != nil {
|
||||
return fmt.Errorf("failed read-only remount of %s: %v", to, err)
|
||||
}
|
||||
}
|
||||
@ -175,7 +197,29 @@ func bindMount(from, to string, readOnly bool) error {
|
||||
}
|
||||
|
||||
func unmountVolume(mountPoint string) error {
|
||||
return execCommandAndValidate("umount", mountPoint)
|
||||
if err := execCommandErr("umount", mountPoint); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fusePidMapMtx.Lock()
|
||||
pid, ok := fusePidMap[mountPoint]
|
||||
if ok {
|
||||
delete(fusePidMap, mountPoint)
|
||||
}
|
||||
fusePidMapMtx.Unlock()
|
||||
|
||||
if ok {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
klog.Warningf("failed to find process %d: %v", pid, err)
|
||||
} else {
|
||||
if _, err = p.Wait(); err != nil {
|
||||
klog.Warningf("%d is not a child process: %v", pid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createMountPoint(root string) error {
|
||||
|
@ -18,40 +18,33 @@ package csicommon
|
||||
|
||||
import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// DefaultControllerServer points to default driver
|
||||
type DefaultControllerServer struct {
|
||||
Driver *CSIDriver
|
||||
}
|
||||
|
||||
func (cs *DefaultControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *DefaultControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ControllerPublishVolume publish volume on node
|
||||
func (cs *DefaultControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ControllerUnpublishVolume unpublish on node
|
||||
func (cs *DefaultControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *DefaultControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ListVolumes lists volumes
|
||||
func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// GetCapacity get volume capacity
|
||||
func (cs *DefaultControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
@ -59,21 +52,24 @@ func (cs *DefaultControllerServer) GetCapacity(ctx context.Context, req *csi.Get
|
||||
// ControllerGetCapabilities implements the default GRPC callout.
|
||||
// Default supports all capabilities
|
||||
func (cs *DefaultControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default ControllerGetCapabilities")
|
||||
klog.V(5).Infof("Using default ControllerGetCapabilities")
|
||||
|
||||
return &csi.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: cs.Driver.cap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateSnapshot creates snapshot
|
||||
func (cs *DefaultControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// DeleteSnapshot deletes snapshot
|
||||
func (cs *DefaultControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// ListSnapshots lists snapshosts
|
||||
func (cs *DefaultControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
@ -19,13 +19,13 @@ package csicommon
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// CSIDriver stores driver information
|
||||
type CSIDriver struct {
|
||||
name string
|
||||
nodeID string
|
||||
@ -34,21 +34,22 @@ type CSIDriver struct {
|
||||
vc []*csi.VolumeCapability_AccessMode
|
||||
}
|
||||
|
||||
// Creates a NewCSIDriver object. Assumes vendor version is equal to driver version &
|
||||
// does not support optional driver plugin info manifest field. Refer to CSI spec for more details.
|
||||
// NewCSIDriver Creates a NewCSIDriver object. Assumes vendor
|
||||
// version is equal to driver version & does not support optional
|
||||
// driver plugin info manifest field. Refer to CSI spec for more details.
|
||||
func NewCSIDriver(name string, v string, nodeID string) *CSIDriver {
|
||||
if name == "" {
|
||||
glog.Errorf("Driver name missing")
|
||||
klog.Errorf("Driver name missing")
|
||||
return nil
|
||||
}
|
||||
|
||||
if nodeID == "" {
|
||||
glog.Errorf("NodeID missing")
|
||||
klog.Errorf("NodeID missing")
|
||||
return nil
|
||||
}
|
||||
// TODO version format and validation
|
||||
if len(v) == 0 {
|
||||
glog.Errorf("Version argument missing")
|
||||
klog.Errorf("Version argument missing")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -61,6 +62,8 @@ func NewCSIDriver(name string, v string, nodeID string) *CSIDriver {
|
||||
return &driver
|
||||
}
|
||||
|
||||
// ValidateControllerServiceRequest validates the controller
|
||||
// plugin capabilities
|
||||
func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
|
||||
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
|
||||
return nil
|
||||
@ -71,32 +74,35 @@ func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapa
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c))
|
||||
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c)) //nolint
|
||||
}
|
||||
|
||||
// AddControllerServiceCapabilities stores the controller capabilities
|
||||
// in driver object
|
||||
func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
|
||||
var csc []*csi.ControllerServiceCapability
|
||||
|
||||
for _, c := range cl {
|
||||
glog.Infof("Enabling controller service capability: %v", c.String())
|
||||
klog.Infof("Enabling controller service capability: %v", c.String())
|
||||
csc = append(csc, NewControllerServiceCapability(c))
|
||||
}
|
||||
|
||||
d.cap = csc
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AddVolumeCapabilityAccessModes stores volume access modes
|
||||
func (d *CSIDriver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
|
||||
var vca []*csi.VolumeCapability_AccessMode
|
||||
for _, c := range vc {
|
||||
glog.Infof("Enabling volume access mode: %v", c.String())
|
||||
klog.Infof("Enabling volume access mode: %v", c.String())
|
||||
vca = append(vca, NewVolumeCapabilityAccessMode(c))
|
||||
}
|
||||
d.vc = vca
|
||||
return vca
|
||||
}
|
||||
|
||||
// GetVolumeCapabilityAccessModes returns access modes
|
||||
func (d *CSIDriver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode {
|
||||
return d.vc
|
||||
}
|
@ -18,18 +18,20 @@ package csicommon
|
||||
|
||||
import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// DefaultIdentityServer stores driver object
|
||||
type DefaultIdentityServer struct {
|
||||
Driver *CSIDriver
|
||||
}
|
||||
|
||||
// GetPluginInfo returns plugin information
|
||||
func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
|
||||
glog.V(5).Infof("Using default GetPluginInfo")
|
||||
klog.V(5).Infof("Using default GetPluginInfo")
|
||||
|
||||
if ids.Driver.name == "" {
|
||||
return nil, status.Error(codes.Unavailable, "Driver name not configured")
|
||||
@ -45,12 +47,14 @@ func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.Ge
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Probe returns empty response
|
||||
func (ids *DefaultIdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) {
|
||||
return &csi.ProbeResponse{}, nil
|
||||
}
|
||||
|
||||
// GetPluginCapabilities returns plugin capabilities
|
||||
func (ids *DefaultIdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default capabilities")
|
||||
klog.V(5).Infof("Using default capabilities")
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
@ -18,34 +18,39 @@ package csicommon
|
||||
|
||||
import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// DefaultNodeServer stores driver object
|
||||
type DefaultNodeServer struct {
|
||||
Driver *CSIDriver
|
||||
}
|
||||
|
||||
func (ns *DefaultNodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
// NodeStageVolume returns unimplemented response
|
||||
func (ns *DefaultNodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (ns *DefaultNodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
// NodeUnstageVolume returns unimplemented response
|
||||
func (ns *DefaultNodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// NodeGetInfo returns node ID
|
||||
func (ns *DefaultNodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||
glog.V(5).Infof("Using default NodeGetInfo")
|
||||
klog.V(5).Infof("Using default NodeGetInfo")
|
||||
|
||||
return &csi.NodeGetInfoResponse{
|
||||
NodeId: ns.Driver.nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NodeGetCapabilities returns RPC unknow capability
|
||||
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default NodeGetCapabilities")
|
||||
klog.V(5).Infof("Using default NodeGetCapabilities")
|
||||
|
||||
return &csi.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csi.NodeServiceCapability{
|
||||
@ -60,6 +65,7 @@ func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.N
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NodeGetVolumeStats returns volume stats
|
||||
func (ns *DefaultNodeServer) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
@ -21,13 +21,12 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// Defines Non blocking GRPC server interfaces
|
||||
// NonBlockingGRPCServer defines Non blocking GRPC server interfaces
|
||||
type NonBlockingGRPCServer interface {
|
||||
// Start services at the endpoint
|
||||
Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer)
|
||||
@ -39,6 +38,7 @@ type NonBlockingGRPCServer interface {
|
||||
ForceStop()
|
||||
}
|
||||
|
||||
// NewNonBlockingGRPCServer return non-blocking GRPC
|
||||
func NewNonBlockingGRPCServer() NonBlockingGRPCServer {
|
||||
return &nonBlockingGRPCServer{}
|
||||
}
|
||||
@ -49,44 +49,45 @@ type nonBlockingGRPCServer struct {
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
// Start start service on endpoint
|
||||
func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
|
||||
|
||||
s.wg.Add(1)
|
||||
|
||||
go s.serve(endpoint, ids, cs, ns)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Wait blocks until the WaitGroup counter
|
||||
func (s *nonBlockingGRPCServer) Wait() {
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
||||
// GracefulStop stops the gRPC server gracefully.
|
||||
func (s *nonBlockingGRPCServer) Stop() {
|
||||
s.server.GracefulStop()
|
||||
}
|
||||
|
||||
// Stop stops the gRPC server.
|
||||
func (s *nonBlockingGRPCServer) ForceStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
|
||||
func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
|
||||
|
||||
proto, addr, err := ParseEndpoint(endpoint)
|
||||
proto, addr, err := parseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
glog.Fatal(err.Error())
|
||||
klog.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if proto == "unix" {
|
||||
addr = "/" + addr
|
||||
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", addr, err.Error())
|
||||
if e := os.Remove(addr); e != nil && !os.IsNotExist(e) {
|
||||
klog.Fatalf("Failed to remove %s, error: %s", addr, e.Error())
|
||||
}
|
||||
}
|
||||
|
||||
listener, err := net.Listen(proto, addr)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen: %v", err)
|
||||
klog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
|
||||
opts := []grpc.ServerOption{
|
||||
@ -105,8 +106,10 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c
|
||||
csi.RegisterNodeServer(server, ns)
|
||||
}
|
||||
|
||||
glog.Infof("Listening for connections on address: %#v", listener.Addr())
|
||||
|
||||
server.Serve(listener)
|
||||
klog.Infof("Listening for connections on address: %#v", listener.Addr())
|
||||
|
||||
err = server.Serve(listener)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to server: %v", err)
|
||||
}
|
||||
}
|
@ -21,44 +21,49 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func ParseEndpoint(ep string) (string, string, error) {
|
||||
func parseEndpoint(ep string) (string, string, error) {
|
||||
if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") {
|
||||
s := strings.SplitN(ep, "://", 2)
|
||||
if s[1] != "" {
|
||||
return s[0], s[1], nil
|
||||
}
|
||||
}
|
||||
return "", "", fmt.Errorf("Invalid endpoint: %v", ep)
|
||||
return "", "", fmt.Errorf("invalid endpoint: %v", ep)
|
||||
}
|
||||
|
||||
// NewVolumeCapabilityAccessMode returns volume access mode
|
||||
func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode {
|
||||
return &csi.VolumeCapability_AccessMode{Mode: mode}
|
||||
}
|
||||
|
||||
// NewDefaultNodeServer initializes default node server
|
||||
func NewDefaultNodeServer(d *CSIDriver) *DefaultNodeServer {
|
||||
return &DefaultNodeServer{
|
||||
Driver: d,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDefaultIdentityServer initializes default identity servier
|
||||
func NewDefaultIdentityServer(d *CSIDriver) *DefaultIdentityServer {
|
||||
return &DefaultIdentityServer{
|
||||
Driver: d,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDefaultControllerServer initializes default controller server
|
||||
func NewDefaultControllerServer(d *CSIDriver) *DefaultControllerServer {
|
||||
return &DefaultControllerServer{
|
||||
Driver: d,
|
||||
}
|
||||
}
|
||||
|
||||
// NewControllerServiceCapability returns controller capabilities
|
||||
func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability {
|
||||
return &csi.ControllerServiceCapability{
|
||||
Type: &csi.ControllerServiceCapability_Rpc{
|
||||
@ -69,6 +74,7 @@ func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type
|
||||
}
|
||||
}
|
||||
|
||||
// RunNodePublishServer starts node server
|
||||
func RunNodePublishServer(endpoint string, d *CSIDriver, ns csi.NodeServer) {
|
||||
ids := NewDefaultIdentityServer(d)
|
||||
|
||||
@ -77,6 +83,7 @@ func RunNodePublishServer(endpoint string, d *CSIDriver, ns csi.NodeServer) {
|
||||
s.Wait()
|
||||
}
|
||||
|
||||
// RunControllerPublishServer starts controller server
|
||||
func RunControllerPublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer) {
|
||||
ids := NewDefaultIdentityServer(d)
|
||||
|
||||
@ -85,6 +92,7 @@ func RunControllerPublishServer(endpoint string, d *CSIDriver, cs csi.Controller
|
||||
s.Wait()
|
||||
}
|
||||
|
||||
// RunControllerandNodePublishServer starts both controller and node server
|
||||
func RunControllerandNodePublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer, ns csi.NodeServer) {
|
||||
ids := NewDefaultIdentityServer(d)
|
||||
|
||||
@ -94,13 +102,13 @@ func RunControllerandNodePublishServer(endpoint string, d *CSIDriver, cs csi.Con
|
||||
}
|
||||
|
||||
func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
glog.V(3).Infof("GRPC call: %s", info.FullMethod)
|
||||
glog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req))
|
||||
klog.V(3).Infof("GRPC call: %s", info.FullMethod)
|
||||
klog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req))
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
glog.Errorf("GRPC error: %v", err)
|
||||
klog.Errorf("GRPC error: %v", err)
|
||||
} else {
|
||||
glog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp))
|
||||
klog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp))
|
||||
}
|
||||
return resp, err
|
||||
}
|
@ -18,16 +18,18 @@ package rbd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
@ -91,17 +93,32 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro
|
||||
func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) {
|
||||
// TODO (sbezverk) Last check for not exceeding total storage capacity
|
||||
|
||||
rbdVol, err := getRBDVolumeOptions(req.GetParameters())
|
||||
isMultiNode := false
|
||||
isBlock := false
|
||||
for _, cap := range req.VolumeCapabilities {
|
||||
// RO modes need to be handled indepedently (ie right now even if access mode is RO, they'll be RW upon attach)
|
||||
if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
|
||||
isMultiNode = true
|
||||
}
|
||||
if cap.GetBlock() != nil {
|
||||
isBlock = true
|
||||
}
|
||||
}
|
||||
|
||||
// We want to fail early if the user is trying to create a RWX on a non-block type device
|
||||
if isMultiNode && !isBlock {
|
||||
return nil, status.Error(codes.InvalidArgument, "multi node access modes are only supported on rbd `block` type volumes")
|
||||
}
|
||||
|
||||
// if it's NOT SINGLE_NODE_WRITER and it's BLOCK we'll set the parameter to ignore the in-use checks
|
||||
rbdVol, err := getRBDVolumeOptions(req.GetParameters(), (isMultiNode && isBlock))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// Generating Volume Name and Volume ID, as according to CSI spec they MUST be different
|
||||
volName := req.GetName()
|
||||
uniqueID := uuid.NewUUID().String()
|
||||
if len(volName) == 0 {
|
||||
volName = rbdVol.Pool + "-dynamic-pvc-" + uniqueID
|
||||
}
|
||||
rbdVol.VolName = volName
|
||||
volumeID := "csi-rbd-vol-" + uniqueID
|
||||
rbdVol.VolID = volumeID
|
||||
@ -110,11 +127,21 @@ func parseVolCreateRequest(req *csi.CreateVolumeRequest) (*rbdVolume, error) {
|
||||
if req.GetCapacityRange() != nil {
|
||||
volSizeBytes = req.GetCapacityRange().GetRequiredBytes()
|
||||
}
|
||||
rbdVol.VolSize = volSizeBytes
|
||||
|
||||
rbdVol.VolSize = util.RoundUpToMiB(volSizeBytes)
|
||||
|
||||
return rbdVol, nil
|
||||
}
|
||||
|
||||
func storeVolumeMetadata(vol *rbdVolume, cp util.CachePersister) error {
|
||||
if err := cp.Create(vol.VolID, vol); err != nil {
|
||||
klog.Errorf("failed to store metadata for volume %s: %v", vol.VolID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolume creates the volume in backend and store the volume metadata
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
|
||||
@ -136,6 +163,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
// request
|
||||
if exVol.VolSize >= req.GetCapacityRange().GetRequiredBytes() {
|
||||
// existing volume is compatible with new request and should be reused.
|
||||
|
||||
if err = storeVolumeMetadata(exVol, cs.MetadataStore); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
|
||||
return &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
@ -153,23 +185,21 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volSizeGB := int(rbdVol.VolSize / 1024 / 1024 / 1024)
|
||||
|
||||
// Check if there is already RBD image with requested name
|
||||
err = cs.checkRBDStatus(rbdVol, req, volSizeGB)
|
||||
err = cs.checkRBDStatus(rbdVol, req, int(rbdVol.VolSize))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if createErr := cs.MetadataStore.Create(rbdVol.VolID, rbdVol); createErr != nil {
|
||||
klog.Warningf("failed to store volume metadata with error: %v", err)
|
||||
if err = deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil {
|
||||
klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, rbdVol.VolName, err)
|
||||
return nil, err
|
||||
}
|
||||
return nil, createErr
|
||||
}
|
||||
// store volume size in bytes (snapshot and check existing volume needs volume
|
||||
// size in bytes)
|
||||
rbdVol.VolSize = rbdVol.VolSize * util.MiB
|
||||
|
||||
rbdVolumes[rbdVol.VolID] = rbdVol
|
||||
|
||||
if err = storeVolumeMetadata(rbdVol, cs.MetadataStore); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: rbdVol.VolID,
|
||||
@ -179,10 +209,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeGB int) error {
|
||||
func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVolumeRequest, volSizeMiB int) error {
|
||||
var err error
|
||||
// Check if there is already RBD image with requested name
|
||||
found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets()) // #nosec
|
||||
//nolint
|
||||
found, _, _ := rbdStatus(rbdVol, rbdVol.UserID, req.GetSecrets())
|
||||
if !found {
|
||||
// if VolumeContentSource is not nil, this request is for snapshot
|
||||
if req.VolumeContentSource != nil {
|
||||
@ -190,10 +221,10 @@ func (cs *ControllerServer) checkRBDStatus(rbdVol *rbdVolume, req *csi.CreateVol
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = createRBDImage(rbdVol, volSizeGB, rbdVol.AdminID, req.GetSecrets())
|
||||
err = createRBDImage(rbdVol, volSizeMiB, rbdVol.AdminID, req.GetSecrets())
|
||||
if err != nil {
|
||||
klog.Warningf("failed to create volume: %v", err)
|
||||
return err
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
klog.V(4).Infof("create volume %s", rbdVol.VolName)
|
||||
@ -214,12 +245,12 @@ func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *
|
||||
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil {
|
||||
return err
|
||||
return status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
|
||||
err := restoreSnapshot(rbdVol, rbdSnap, rbdVol.AdminID, req.GetSecrets())
|
||||
if err != nil {
|
||||
return err
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
klog.V(4).Infof("create volume %s from snapshot %s", req.GetName(), rbdSnap.SnapName)
|
||||
return nil
|
||||
@ -244,9 +275,11 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
|
||||
rbdVol := &rbdVolume{}
|
||||
if err := cs.MetadataStore.Get(volumeID, rbdVol); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||
klog.V(3).Infof("metadata for volume %s not found, assuming the volume to be already deleted (%v)", volumeID, err)
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -256,17 +289,66 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
if err := deleteRBDImage(rbdVol, rbdVol.AdminID, req.GetSecrets()); err != nil {
|
||||
// TODO: can we detect "already deleted" situations here and proceed?
|
||||
klog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err)
|
||||
return nil, err
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err := cs.MetadataStore.Delete(volumeID); err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
delete(rbdVolumes, volumeID)
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// ListVolumes returns a list of volumes stored in memory
|
||||
func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
|
||||
var startToken int
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_VOLUMES); err != nil {
|
||||
klog.Warningf("invalid list volume req: %v", req)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//validate starting token if present
|
||||
if len(req.GetStartingToken()) > 0 {
|
||||
i, parseErr := strconv.ParseUint(req.StartingToken, 10, 32)
|
||||
if parseErr != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error())
|
||||
}
|
||||
//check starting Token is greater than list of rbd volumes
|
||||
if len(rbdVolumes) < int(i) {
|
||||
return nil, status.Errorf(codes.Aborted, "invalid starting token %s", parseErr.Error())
|
||||
}
|
||||
startToken = int(i)
|
||||
}
|
||||
|
||||
var entries []*csi.ListVolumesResponse_Entry
|
||||
|
||||
keys := make([]string, 0)
|
||||
for k := range rbdVolumes {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for index, k := range keys {
|
||||
if index < startToken {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, &csi.ListVolumesResponse_Entry{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: rbdVolumes[k].VolID,
|
||||
CapacityBytes: rbdVolumes[k].VolSize,
|
||||
VolumeContext: extractStoredVolOpt(rbdVolumes[k]),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
resp := &csi.ListVolumesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ValidateVolumeCapabilities checks whether the volume capabilities requested
|
||||
// are supported.
|
||||
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
@ -294,6 +376,7 @@ func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *cs
|
||||
|
||||
// CreateSnapshot creates the snapshot in backend and stores metadata
|
||||
// in store
|
||||
// nolint: gocyclo
|
||||
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
|
||||
if err := cs.validateSnapshotReq(req); err != nil {
|
||||
@ -311,6 +394,10 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
// check for the requested source volume id and already allocated source volume id
|
||||
if exSnap, err := getRBDSnapshotByName(req.GetName()); err == nil {
|
||||
if req.SourceVolumeId == exSnap.SourceVolumeID {
|
||||
if err = storeSnapshotMetadata(exSnap, cs.MetadataStore); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: exSnap.SizeBytes,
|
||||
@ -328,7 +415,7 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
|
||||
rbdSnap, err := getRBDSnapshotOptions(req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
// Generating Snapshot Name and Snapshot ID, as according to CSI spec they MUST be different
|
||||
@ -339,7 +426,7 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
return nil, status.Errorf(codes.NotFound, "Source Volume ID %s cannot found", req.GetSourceVolumeId())
|
||||
}
|
||||
if !hasSnapshotFeature(rbdVolume.ImageFeatures) {
|
||||
return nil, fmt.Errorf("volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
|
||||
return nil, status.Errorf(codes.InvalidArgument, "volume(%s) has not snapshot feature(layering)", req.GetSourceVolumeId())
|
||||
}
|
||||
|
||||
rbdSnap.VolName = rbdVolume.VolName
|
||||
@ -352,16 +439,17 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
err = cs.doSnapshot(rbdSnap, req.GetSecrets())
|
||||
// if we already have the snapshot, return the snapshot
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
rbdSnap.CreatedAt = ptypes.TimestampNow().GetSeconds()
|
||||
|
||||
if err = cs.storeSnapMetadata(rbdSnap, req.GetSecrets()); err != nil {
|
||||
return nil, err
|
||||
rbdSnapshots[snapshotID] = rbdSnap
|
||||
|
||||
if err = storeSnapshotMetadata(rbdSnap, cs.MetadataStore); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
rbdSnapshots[snapshotID] = rbdSnap
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: rbdSnap.SizeBytes,
|
||||
@ -375,22 +463,13 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) storeSnapMetadata(rbdSnap *rbdSnapshot, secret map[string]string) error {
|
||||
errCreate := cs.MetadataStore.Create(rbdSnap.SnapID, rbdSnap)
|
||||
if errCreate != nil {
|
||||
klog.Warningf("rbd: failed to store snapInfo with error: %v", errCreate)
|
||||
// Unprotect snapshot
|
||||
err := unprotectSnapshot(rbdSnap, rbdSnap.AdminID, secret)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to unprotect snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
|
||||
func storeSnapshotMetadata(rbdSnap *rbdSnapshot, cp util.CachePersister) error {
|
||||
if err := cp.Create(rbdSnap.SnapID, rbdSnap); err != nil {
|
||||
klog.Errorf("failed to store metadata for snapshot %s: %v", rbdSnap.SnapID, err)
|
||||
return err
|
||||
}
|
||||
// Deleting snapshot
|
||||
klog.V(4).Infof("deleting Snaphot %s", rbdSnap.SnapName)
|
||||
if err = deleteSnapshot(rbdSnap, rbdSnap.AdminID, secret); err != nil {
|
||||
return status.Errorf(codes.Unknown, "This Snapshot should be removed but failed to delete snapshot: %s/%s with error: %v", rbdSnap.Pool, rbdSnap.SnapName, err)
|
||||
}
|
||||
}
|
||||
return errCreate
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest) error {
|
||||
@ -438,7 +517,7 @@ func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]s
|
||||
if err != nil {
|
||||
return fmt.Errorf("snapshot is created but failed to protect and delete snapshot: %v", err)
|
||||
}
|
||||
return fmt.Errorf("snapshot is created but failed to protect snapshot")
|
||||
return errors.New("snapshot is created but failed to protect snapshot")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -466,6 +545,11 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
|
||||
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil {
|
||||
if err, ok := err.(*util.CacheEntryNotFound); ok {
|
||||
klog.V(3).Infof("metadata for snapshot %s not found, assuming the snapshot to be already deleted (%v)", snapshotID, err)
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -482,7 +566,7 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
|
||||
}
|
||||
|
||||
if err := cs.MetadataStore.Delete(snapshotID); err != nil {
|
||||
return nil, err
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
delete(rbdSnapshots, snapshotID)
|
||||
|
@ -19,8 +19,9 @@ package rbd
|
||||
import (
|
||||
"context"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
)
|
||||
|
||||
// IdentityServer struct of rbd CSI driver with supported methods of CSI
|
||||
|
@ -23,16 +23,14 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
"github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
)
|
||||
|
||||
// NodeServer struct of ceph rbd driver with supported methods of CSI
|
||||
@ -45,21 +43,12 @@ type NodeServer struct {
|
||||
//TODO remove both stage and unstage methods
|
||||
//once https://github.com/kubernetes-csi/drivers/pull/145 is merged
|
||||
|
||||
// NodeStageVolume returns unimplemented response
|
||||
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// NodeUnstageVolume returns unimplemented response
|
||||
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// NodePublishVolume mounts the volume mounted to the device path to the target
|
||||
// path
|
||||
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
targetPath := req.GetTargetPath()
|
||||
targetPathMutex.LockKey(targetPath)
|
||||
disableInUseChecks := false
|
||||
|
||||
defer func() {
|
||||
if err := targetPathMutex.UnlockKey(targetPath); err != nil {
|
||||
@ -82,7 +71,18 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
if !notMnt {
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
volOptions, err := getRBDVolumeOptions(req.GetVolumeContext())
|
||||
|
||||
// MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes
|
||||
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
|
||||
if isBlock {
|
||||
disableInUseChecks = true
|
||||
} else {
|
||||
klog.Warningf("MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`, invalid AccessMode for volume: %v", req.GetVolumeId())
|
||||
return nil, status.Error(codes.InvalidArgument, "rbd: RWX access mode request is only valid for volumes with access type `block`")
|
||||
}
|
||||
}
|
||||
|
||||
volOptions, err := getRBDVolumeOptions(req.GetVolumeContext(), disableInUseChecks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,12 +17,11 @@ limitations under the License.
|
||||
package rbd
|
||||
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
||||
"github.com/ceph/ceph-csi/pkg/util"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/nsenter"
|
||||
"k8s.io/utils/exec"
|
||||
@ -30,11 +29,13 @@ import (
|
||||
|
||||
// PluginFolder defines the location of rbdplugin
|
||||
const (
|
||||
PluginFolder = "/var/lib/kubelet/plugins/csi-rbdplugin"
|
||||
rbdDefaultAdminID = "admin"
|
||||
rbdDefaultUserID = rbdDefaultAdminID
|
||||
)
|
||||
|
||||
// PluginFolder defines the location of ceph plugin
|
||||
var PluginFolder = "/var/lib/kubelet/plugins/"
|
||||
|
||||
// Driver contains the default identity,node and controller struct
|
||||
type Driver struct {
|
||||
cd *csicommon.CSIDriver
|
||||
@ -46,6 +47,8 @@ type Driver struct {
|
||||
|
||||
var (
|
||||
version = "1.0.0"
|
||||
// confStore is the global config store
|
||||
confStore *util.ConfigStore
|
||||
)
|
||||
|
||||
// NewDriver returns new rbd driver
|
||||
@ -86,10 +89,16 @@ func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, err
|
||||
|
||||
// Run start a non-blocking grpc controller,node and identityserver for
|
||||
// rbd CSI driver which can serve multiple parallel requests
|
||||
func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, cachePersister util.CachePersister) {
|
||||
func (r *Driver) Run(driverName, nodeID, endpoint, configRoot string, containerized bool, cachePersister util.CachePersister) {
|
||||
var err error
|
||||
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||
|
||||
// Initialize config store
|
||||
confStore, err = util.NewConfigStore(configRoot)
|
||||
if err != nil {
|
||||
klog.Fatalln("Failed to initialize config store.")
|
||||
}
|
||||
|
||||
// Initialize default library driver
|
||||
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
|
||||
if r.cd == nil {
|
||||
@ -98,11 +107,19 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca
|
||||
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
||||
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
|
||||
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
|
||||
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
|
||||
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
|
||||
})
|
||||
r.cd.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
|
||||
|
||||
// We only support the multi-writer option when using block, but it's a supported capability for the plugin in general
|
||||
// In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY,
|
||||
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
|
||||
// will work those as follow up features
|
||||
r.cd.AddVolumeCapabilityAccessModes(
|
||||
[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
||||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
|
||||
|
||||
// Create GRPC servers
|
||||
r.ids = NewIdentityServer(r.cd)
|
||||
|
@ -258,6 +258,7 @@ func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string
|
||||
Factor: rbdImageWatcherFactor,
|
||||
Steps: rbdImageWatcherSteps,
|
||||
}
|
||||
|
||||
err = waitForrbdImage(backoff, volOptions, userID, credentials)
|
||||
|
||||
if err != nil {
|
||||
@ -279,7 +280,7 @@ func createPath(volOpt *rbdVolume, userID string, creds map[string]string) (stri
|
||||
}
|
||||
|
||||
klog.V(5).Infof("rbd: map mon %s", mon)
|
||||
key, err := getRBDKey(userID, creds)
|
||||
key, err := getRBDKey(volOpt.ClusterID, userID, creds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -313,6 +314,10 @@ func waitForrbdImage(backoff wait.Backoff, volOptions *rbdVolume, userID string,
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
|
||||
}
|
||||
if (volOptions.DisableInUseChecks) && (used) {
|
||||
klog.V(2).Info("valid multi-node attach requested, ignoring watcher in-use result")
|
||||
return used, nil
|
||||
}
|
||||
return !used, nil
|
||||
})
|
||||
// return error if rbd image has not become available for the specified timeout
|
||||
|
@ -51,6 +51,8 @@ type rbdVolume struct {
|
||||
AdminID string `json:"adminId"`
|
||||
UserID string `json:"userId"`
|
||||
Mounter string `json:"mounter"`
|
||||
DisableInUseChecks bool `json:"disableInUseChecks"`
|
||||
ClusterID string `json:"clusterId"`
|
||||
}
|
||||
|
||||
type rbdSnapshot struct {
|
||||
@ -65,6 +67,7 @@ type rbdSnapshot struct {
|
||||
SizeBytes int64 `json:"sizeBytes"`
|
||||
AdminID string `json:"adminId"`
|
||||
UserID string `json:"userId"`
|
||||
ClusterID string `json:"clusterId"`
|
||||
}
|
||||
|
||||
var (
|
||||
@ -84,13 +87,26 @@ var (
|
||||
supportedFeatures = sets.NewString("layering")
|
||||
)
|
||||
|
||||
func getRBDKey(id string, credentials map[string]string) (string, error) {
|
||||
func getRBDKey(clusterid, id string, credentials map[string]string) (string, error) {
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
key string
|
||||
)
|
||||
|
||||
if key, ok := credentials[id]; ok {
|
||||
return key, nil
|
||||
if key, ok = credentials[id]; !ok {
|
||||
if clusterid != "" {
|
||||
key, err = confStore.KeyForUser(clusterid, id)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found in config store of clusterID (%s)", id, clusterid)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("RBD key for ID: %s not found", id)
|
||||
}
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func getMon(pOpts *rbdVolume, credentials map[string]string) (string, error) {
|
||||
mon := pOpts.Monitors
|
||||
@ -120,18 +136,18 @@ func createRBDImage(pOpts *rbdVolume, volSz int, adminID string, credentials map
|
||||
}
|
||||
|
||||
image := pOpts.VolName
|
||||
volSzGB := fmt.Sprintf("%dG", volSz)
|
||||
volSzMiB := fmt.Sprintf("%dM", volSz)
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pOpts.ImageFormat == rbdImageFormat2 {
|
||||
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool)
|
||||
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s ", image, volSzMiB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool)
|
||||
} else {
|
||||
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool)
|
||||
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s", image, volSzMiB, pOpts.ImageFormat, mon, pOpts.Pool)
|
||||
}
|
||||
args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat}
|
||||
args := []string{"create", image, "--size", volSzMiB, "--pool", pOpts.Pool, "--id", adminID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat}
|
||||
if pOpts.ImageFormat == rbdImageFormat2 {
|
||||
args = append(args, "--image-feature", pOpts.ImageFeatures)
|
||||
}
|
||||
@ -153,7 +169,7 @@ func rbdStatus(pOpts *rbdVolume, userID string, credentials map[string]string) (
|
||||
image := pOpts.VolName
|
||||
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
|
||||
|
||||
key, err := getRBDKey(userID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, userID, credentials)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
@ -201,7 +217,7 @@ func deleteRBDImage(pOpts *rbdVolume, adminID string, credentials map[string]str
|
||||
klog.Info("rbd is still being used ", image)
|
||||
return fmt.Errorf("rbd %s is still being used", image)
|
||||
}
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -226,24 +242,82 @@ func execCommand(command string, args []string) ([]byte, error) {
|
||||
return cmd.CombinedOutput()
|
||||
}
|
||||
|
||||
func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
|
||||
func getMonsAndClusterID(options map[string]string) (monitors, clusterID, monInSecret string, err error) {
|
||||
var ok bool
|
||||
|
||||
monitors, ok = options["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if monInSecret, ok = options["monValueFromSecret"]; !ok {
|
||||
// if mons are not in secret, check if we have a cluster-id
|
||||
if clusterID, ok = options["clusterID"]; !ok {
|
||||
err = errors.New("either monitors or monValueFromSecret or clusterID must be set")
|
||||
return
|
||||
}
|
||||
|
||||
if monitors, err = confStore.Mons(clusterID); err != nil {
|
||||
klog.Errorf("failed getting mons (%s)", err)
|
||||
err = fmt.Errorf("failed to fetch monitor list using clusterID (%s)", clusterID)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getIDs(options map[string]string, clusterID string) (adminID, userID string, err error) {
|
||||
var ok bool
|
||||
|
||||
adminID, ok = options["adminid"]
|
||||
switch {
|
||||
case ok:
|
||||
case clusterID != "":
|
||||
if adminID, err = confStore.AdminID(clusterID); err != nil {
|
||||
klog.Errorf("failed getting adminID (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch adminID for clusterID (%s)", clusterID)
|
||||
}
|
||||
default:
|
||||
adminID = rbdDefaultAdminID
|
||||
}
|
||||
|
||||
userID, ok = options["userid"]
|
||||
switch {
|
||||
case ok:
|
||||
case clusterID != "":
|
||||
if userID, err = confStore.UserID(clusterID); err != nil {
|
||||
klog.Errorf("failed getting userID (%s)", err)
|
||||
return "", "", fmt.Errorf("failed to fetch userID using clusterID (%s)", clusterID)
|
||||
}
|
||||
default:
|
||||
userID = rbdDefaultUserID
|
||||
}
|
||||
|
||||
return adminID, userID, err
|
||||
}
|
||||
|
||||
func getRBDVolumeOptions(volOptions map[string]string, disableInUseChecks bool) (*rbdVolume, error) {
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
rbdVol := &rbdVolume{}
|
||||
rbdVol.Pool, ok = volOptions["pool"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing required parameter pool")
|
||||
}
|
||||
rbdVol.Monitors, ok = volOptions["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if rbdVol.MonValueFromSecret, ok = volOptions["monValueFromSecret"]; !ok {
|
||||
return nil, fmt.Errorf("either monitors or monValueFromSecret must be set")
|
||||
return nil, errors.New("missing required parameter pool")
|
||||
}
|
||||
|
||||
rbdVol.Monitors, rbdVol.ClusterID, rbdVol.MonValueFromSecret, err = getMonsAndClusterID(volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rbdVol.ImageFormat, ok = volOptions["imageFormat"]
|
||||
if !ok {
|
||||
rbdVol.ImageFormat = rbdImageFormat2
|
||||
}
|
||||
|
||||
if rbdVol.ImageFormat == rbdImageFormat2 {
|
||||
// if no image features is provided, it results in empty string
|
||||
// which disable all RBD image format 2 features as we expected
|
||||
@ -259,48 +333,58 @@ func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
|
||||
}
|
||||
|
||||
}
|
||||
getCredsFromVol(rbdVol, volOptions)
|
||||
|
||||
klog.V(3).Infof("setting disableInUseChecks on rbd volume to: %v", disableInUseChecks)
|
||||
rbdVol.DisableInUseChecks = disableInUseChecks
|
||||
|
||||
err = getCredsFromVol(rbdVol, volOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rbdVol, nil
|
||||
}
|
||||
|
||||
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) {
|
||||
var ok bool
|
||||
rbdVol.AdminID, ok = volOptions["adminid"]
|
||||
if !ok {
|
||||
rbdVol.AdminID = rbdDefaultAdminID
|
||||
}
|
||||
rbdVol.UserID, ok = volOptions["userid"]
|
||||
if !ok {
|
||||
rbdVol.UserID = rbdDefaultUserID
|
||||
func getCredsFromVol(rbdVol *rbdVolume, volOptions map[string]string) error {
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
rbdVol.AdminID, rbdVol.UserID, err = getIDs(volOptions, rbdVol.ClusterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rbdVol.Mounter, ok = volOptions["mounter"]
|
||||
if !ok {
|
||||
rbdVol.Mounter = rbdDefaultMounter
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getRBDSnapshotOptions(snapOptions map[string]string) (*rbdSnapshot, error) {
|
||||
var ok bool
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
rbdSnap.Pool, ok = snapOptions["pool"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing required parameter pool")
|
||||
}
|
||||
rbdSnap.Monitors, ok = snapOptions["monitors"]
|
||||
if !ok {
|
||||
// if mons are not set in options, check if they are set in secret
|
||||
if rbdSnap.MonValueFromSecret, ok = snapOptions["monValueFromSecret"]; !ok {
|
||||
return nil, fmt.Errorf("either monitors or monValueFromSecret must be set")
|
||||
}
|
||||
}
|
||||
rbdSnap.AdminID, ok = snapOptions["adminid"]
|
||||
if !ok {
|
||||
rbdSnap.AdminID = rbdDefaultAdminID
|
||||
}
|
||||
rbdSnap.UserID, ok = snapOptions["userid"]
|
||||
if !ok {
|
||||
rbdSnap.UserID = rbdDefaultUserID
|
||||
return nil, errors.New("missing required parameter pool")
|
||||
}
|
||||
|
||||
rbdSnap.Monitors, rbdSnap.ClusterID, rbdSnap.MonValueFromSecret, err = getMonsAndClusterID(snapOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rbdSnap.AdminID, rbdSnap.UserID, err = getIDs(snapOptions, rbdSnap.ClusterID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rbdSnap, nil
|
||||
}
|
||||
|
||||
@ -362,7 +446,7 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -383,6 +467,37 @@ func protectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractStoredVolOpt(r *rbdVolume) map[string]string {
|
||||
volOptions := make(map[string]string)
|
||||
volOptions["pool"] = r.Pool
|
||||
|
||||
if len(r.Monitors) > 0 {
|
||||
volOptions["monitors"] = r.Monitors
|
||||
}
|
||||
|
||||
if len(r.MonValueFromSecret) > 0 {
|
||||
volOptions["monValueFromSecret"] = r.MonValueFromSecret
|
||||
}
|
||||
|
||||
volOptions["imageFormat"] = r.ImageFormat
|
||||
|
||||
if len(r.ImageFeatures) > 0 {
|
||||
volOptions["imageFeatures"] = r.ImageFeatures
|
||||
}
|
||||
|
||||
if len(r.AdminID) > 0 {
|
||||
volOptions["adminId"] = r.AdminID
|
||||
}
|
||||
|
||||
if len(r.UserID) > 0 {
|
||||
volOptions["userId"] = r.UserID
|
||||
}
|
||||
if len(r.Mounter) > 0 {
|
||||
volOptions["mounter"] = r.Mounter
|
||||
}
|
||||
return volOptions
|
||||
}
|
||||
|
||||
func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]string) error {
|
||||
var output []byte
|
||||
|
||||
@ -394,7 +509,7 @@ func createSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -421,7 +536,7 @@ func unprotectSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[strin
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -448,7 +563,7 @@ func deleteSnapshot(pOpts *rbdSnapshot, adminID string, credentials map[string]s
|
||||
image := pOpts.VolName
|
||||
snapID := pOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -475,7 +590,7 @@ func restoreSnapshot(pVolOpts *rbdVolume, pSnapOpts *rbdSnapshot, adminID string
|
||||
image := pVolOpts.VolName
|
||||
snapID := pSnapOpts.SnapID
|
||||
|
||||
key, err := getRBDKey(adminID, credentials)
|
||||
key, err := getRBDKey(pVolOpts.ClusterID, adminID, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -27,9 +27,15 @@ const (
|
||||
PluginFolder = "/var/lib/kubelet/plugins"
|
||||
)
|
||||
|
||||
// ForAllFunc stores metadata with identifier
|
||||
// ForAllFunc is a unary predicate for visiting all cache entries
|
||||
// matching the `pattern' in CachePersister's ForAll function.
|
||||
type ForAllFunc func(identifier string) error
|
||||
|
||||
// CacheEntryNotFound is an error type for "Not Found" cache errors
|
||||
type CacheEntryNotFound struct {
|
||||
error
|
||||
}
|
||||
|
||||
// CachePersister interface implemented for store
|
||||
type CachePersister interface {
|
||||
Create(identifier string, data interface{}) error
|
||||
@ -50,6 +56,7 @@ func NewCachePersister(metadataStore, driverName string) (CachePersister, error)
|
||||
klog.Infof("cache-persister: using node as metadata cache persister")
|
||||
nc := &NodeCache{}
|
||||
nc.BasePath = PluginFolder + "/" + driverName
|
||||
nc.CacheDir = "controller"
|
||||
return nc, nil
|
||||
}
|
||||
return nil, errors.New("cache-persister: couldn't parse metadatastorage flag")
|
||||
|
137
pkg/util/configstore.go
Normal file
137
pkg/util/configstore.go
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
Copyright 2019 The Ceph-CSI Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/klog"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StoreReader interface enables plugging different stores, that contain the
|
||||
// keys and data. (e.g k8s secrets or local files)
|
||||
type StoreReader interface {
|
||||
DataForKey(clusterID string, key string) (string, error)
|
||||
}
|
||||
|
||||
/* ConfigKeys contents and format,
|
||||
- csMonitors: MON list, comma separated
|
||||
- csAdminID: adminID, used for provisioning
|
||||
- csUserID: userID, used for publishing
|
||||
- csAdminKey: key, for adminID in csProvisionerUser
|
||||
- csUserKey: key, for userID in csPublisherUser
|
||||
- csPools: Pool list, comma separated
|
||||
*/
|
||||
|
||||
// Constants for various ConfigKeys
|
||||
const (
|
||||
csMonitors = "monitors"
|
||||
csAdminID = "adminid"
|
||||
csUserID = "userid"
|
||||
csAdminKey = "adminkey"
|
||||
csUserKey = "userkey"
|
||||
csPools = "pools"
|
||||
)
|
||||
|
||||
// ConfigStore provides various gettors for ConfigKeys
|
||||
type ConfigStore struct {
|
||||
StoreReader
|
||||
}
|
||||
|
||||
// dataForKey returns data from the config store for the provided key
|
||||
func (dc *ConfigStore) dataForKey(clusterID, key string) (string, error) {
|
||||
if dc.StoreReader != nil {
|
||||
return dc.StoreReader.DataForKey(clusterID, key)
|
||||
}
|
||||
|
||||
return "", errors.New("config store location uninitialized")
|
||||
}
|
||||
|
||||
// Mons returns a comma separated MON list from the cluster config represented by clusterID
|
||||
func (dc *ConfigStore) Mons(clusterID string) (string, error) {
|
||||
return dc.dataForKey(clusterID, csMonitors)
|
||||
}
|
||||
|
||||
// Pools returns a list of pool names from the cluster config represented by clusterID
|
||||
func (dc *ConfigStore) Pools(clusterID string) ([]string, error) {
|
||||
content, err := dc.dataForKey(clusterID, csPools)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return strings.Split(content, ","), nil
|
||||
}
|
||||
|
||||
// AdminID returns the admin ID from the cluster config represented by clusterID
|
||||
func (dc *ConfigStore) AdminID(clusterID string) (string, error) {
|
||||
return dc.dataForKey(clusterID, csAdminID)
|
||||
}
|
||||
|
||||
// UserID returns the user ID from the cluster config represented by clusterID
|
||||
func (dc *ConfigStore) UserID(clusterID string) (string, error) {
|
||||
return dc.dataForKey(clusterID, csUserID)
|
||||
}
|
||||
|
||||
// KeyForUser returns the key for the requested user ID from the cluster config
|
||||
// represented by clusterID
|
||||
func (dc *ConfigStore) KeyForUser(clusterID, userID string) (data string, err error) {
|
||||
var fetchKey string
|
||||
user, err := dc.AdminID(clusterID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if user == userID {
|
||||
fetchKey = csAdminKey
|
||||
} else {
|
||||
user, err = dc.UserID(clusterID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if user != userID {
|
||||
err = fmt.Errorf("requested user (%s) not found in cluster configuration of (%s)", userID, clusterID)
|
||||
return
|
||||
}
|
||||
|
||||
fetchKey = csUserKey
|
||||
}
|
||||
|
||||
return dc.dataForKey(clusterID, fetchKey)
|
||||
}
|
||||
|
||||
// NewConfigStore returns a config store based on value of configRoot. If
|
||||
// configRoot is not "k8s_objects" then it is assumed to be a path to a
|
||||
// directory, under which the configuration files can be found
|
||||
func NewConfigStore(configRoot string) (*ConfigStore, error) {
|
||||
if configRoot != "k8s_objects" {
|
||||
klog.Infof("cache-store: using files in path (%s) as config store", configRoot)
|
||||
fc := &FileConfig{}
|
||||
fc.BasePath = path.Clean(configRoot)
|
||||
dc := &ConfigStore{fc}
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
klog.Infof("cache-store: using k8s objects as config store")
|
||||
kc := &K8sConfig{}
|
||||
kc.Client = NewK8sClient()
|
||||
kc.Namespace = GetK8sNamespace()
|
||||
dc := &ConfigStore{kc}
|
||||
return dc, nil
|
||||
}
|
160
pkg/util/configstore_test.go
Normal file
160
pkg/util/configstore_test.go
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2019 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var basePath = "./test_artifacts"
|
||||
var clusterID = "testclusterid"
|
||||
var cs *ConfigStore
|
||||
|
||||
func cleanupTestData() {
|
||||
os.RemoveAll(basePath)
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func TestConfigStore(t *testing.T) {
|
||||
var err error
|
||||
var data string
|
||||
var content string
|
||||
var testDir string
|
||||
|
||||
defer cleanupTestData()
|
||||
|
||||
cs, err = NewConfigStore(basePath)
|
||||
if err != nil {
|
||||
t.Errorf("Fatal, failed to get a new config store")
|
||||
}
|
||||
|
||||
err = os.MkdirAll(basePath, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Should fail as clusterid directory is missing
|
||||
_, err = cs.Mons(clusterID)
|
||||
if err == nil {
|
||||
t.Errorf("Failed: expected error due to missing parent directory")
|
||||
}
|
||||
|
||||
testDir = basePath + "/" + "ceph-cluster-" + clusterID
|
||||
err = os.MkdirAll(testDir, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Should fail as mons file is missing
|
||||
_, err = cs.Mons(clusterID)
|
||||
if err == nil {
|
||||
t.Errorf("Failed: expected error due to missing mons file")
|
||||
}
|
||||
|
||||
data = ""
|
||||
err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Should fail as MONs is an empty string
|
||||
content, err = cs.Mons(clusterID)
|
||||
if err == nil {
|
||||
t.Errorf("Failed: want (%s), got (%s)", data, content)
|
||||
}
|
||||
|
||||
data = "mon1,mon2,mon3"
|
||||
err = ioutil.WriteFile(testDir+"/"+csMonitors, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching MONs should succeed
|
||||
content, err = cs.Mons(clusterID)
|
||||
if err != nil || content != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
data = "pool1,pool2"
|
||||
err = ioutil.WriteFile(testDir+"/"+csPools, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching MONs should succeed
|
||||
listContent, err := cs.Pools(clusterID)
|
||||
if err != nil || strings.Join(listContent, ",") != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
data = "provuser"
|
||||
err = ioutil.WriteFile(testDir+"/"+csAdminID, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching provuser should succeed
|
||||
content, err = cs.AdminID(clusterID)
|
||||
if err != nil || content != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
data = "pubuser"
|
||||
err = ioutil.WriteFile(testDir+"/"+csUserID, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching pubuser should succeed
|
||||
content, err = cs.UserID(clusterID)
|
||||
if err != nil || content != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
data = "provkey"
|
||||
err = ioutil.WriteFile(testDir+"/"+csAdminKey, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching provkey should succeed
|
||||
content, err = cs.KeyForUser(clusterID, "provuser")
|
||||
if err != nil || content != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
data = "pubkey"
|
||||
err = ioutil.WriteFile(testDir+"/"+csUserKey, []byte(data), 0644)
|
||||
if err != nil {
|
||||
t.Errorf("Test setup error %s", err)
|
||||
}
|
||||
|
||||
// TEST: Fetching pubkey should succeed
|
||||
content, err = cs.KeyForUser(clusterID, "pubuser")
|
||||
if err != nil || content != data {
|
||||
t.Errorf("Failed: want (%s), got (%s), err (%s)", data, content, err)
|
||||
}
|
||||
|
||||
// TEST: Fetching random user key should fail
|
||||
_, err = cs.KeyForUser(clusterID, "random")
|
||||
if err == nil {
|
||||
t.Errorf("Failed: Expected to fail fetching random user key")
|
||||
}
|
||||
}
|
57
pkg/util/fileconfig.go
Normal file
57
pkg/util/fileconfig.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2019 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
)
|
||||
|
||||
/*
|
||||
FileConfig is a ConfigStore interface implementation that reads configuration
|
||||
information from files.
|
||||
|
||||
BasePath defines the directory under which FileConfig will attempt to open and
|
||||
read contents of various Ceph cluster configurations.
|
||||
|
||||
Each Ceph cluster configuration is stored under a directory named,
|
||||
BasePath/ceph-cluster-<clusterid>, where <clusterid> uniquely identifies and
|
||||
separates the each Ceph cluster configuration.
|
||||
|
||||
Under each Ceph cluster configuration directory, individual files named as per
|
||||
the ConfigKeys constants in the ConfigStore interface, store the required
|
||||
configuration information.
|
||||
*/
|
||||
type FileConfig struct {
|
||||
BasePath string
|
||||
}
|
||||
|
||||
// DataForKey reads the appropriate config file, named using key, and returns
|
||||
// the contents of the file to the caller
|
||||
func (fc *FileConfig) DataForKey(clusterid, key string) (data string, err error) {
|
||||
pathToKey := path.Join(fc.BasePath, "ceph-cluster-"+clusterid, key)
|
||||
// #nosec
|
||||
content, err := ioutil.ReadFile(pathToKey)
|
||||
if err != nil || string(content) == "" {
|
||||
err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err)
|
||||
return
|
||||
}
|
||||
|
||||
data = string(content)
|
||||
return
|
||||
}
|
@ -25,7 +25,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8s "k8s.io/client-go/kubernetes"
|
||||
@ -110,7 +110,7 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun
|
||||
continue
|
||||
}
|
||||
if err = json.Unmarshal([]byte(data), destObj); err != nil {
|
||||
return errors.Wrap(err, "k8s-cm-cache: unmarshal error")
|
||||
return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", cm.ObjectMeta.Name)
|
||||
}
|
||||
if err = f(cm.ObjectMeta.Name); err != nil {
|
||||
return err
|
||||
@ -123,12 +123,12 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun
|
||||
func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error {
|
||||
cm, err := k8scm.getMetadataCM(identifier)
|
||||
if cm != nil && err == nil {
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap already exists, skipping configmap creation")
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap %s already exists, skipping configmap creation", identifier)
|
||||
return nil
|
||||
}
|
||||
dataJSON, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "k8s-cm-cache: marshal error")
|
||||
return errors.Wrapf(err, "k8s-cm-cache: JSON marshaling failed for configmap %s", identifier)
|
||||
}
|
||||
cm = &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -145,13 +145,13 @@ func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error {
|
||||
_, err = k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Create(cm)
|
||||
if err != nil {
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap already exists")
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap %s already exists", identifier)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "k8s-cm-cache: couldn't persist %s metadata as configmap", identifier)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap %s successfully created\n", identifier)
|
||||
klog.V(4).Infof("k8s-cm-cache: configmap %s successfully created", identifier)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -159,11 +159,15 @@ func (k8scm *K8sCMCache) Create(identifier string, data interface{}) error {
|
||||
func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error {
|
||||
cm, err := k8scm.getMetadataCM(identifier)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return &CacheEntryNotFound{err}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal([]byte(cm.Data[cmDataKey]), data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "k8s-cm-cache: unmarshal error")
|
||||
return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", identifier)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -172,6 +176,11 @@ func (k8scm *K8sCMCache) Get(identifier string, data interface{}) error {
|
||||
func (k8scm *K8sCMCache) Delete(identifier string) error {
|
||||
err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).Delete(identifier, nil)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
klog.V(4).Infof("k8s-cm-cache: cannot delete missing metadata configmap %s, assuming it's already deleted", identifier)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "k8s-cm-cache: couldn't delete metadata configmap %s", identifier)
|
||||
}
|
||||
klog.V(4).Infof("k8s-cm-cache: successfully deleted metadata configmap %s", identifier)
|
||||
|
58
pkg/util/k8sconfig.go
Normal file
58
pkg/util/k8sconfig.go
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright 2019 ceph-csi authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8s "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
/*
|
||||
K8sConfig is a ConfigStore interface implementation that reads configuration
|
||||
information from k8s secrets.
|
||||
|
||||
Each Ceph cluster configuration secret is expected to be named,
|
||||
ceph-cluster-<clusterid>, where <clusterid> uniquely identifies and
|
||||
separates the each Ceph cluster configuration.
|
||||
|
||||
The secret is expected to contain keys, as defined by the ConfigKeys constants
|
||||
in the ConfigStore interface.
|
||||
*/
|
||||
type K8sConfig struct {
|
||||
Client *k8s.Clientset
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// DataForKey reads the appropriate k8s secret, named using clusterid, and
|
||||
// returns the contents of key within the secret
|
||||
func (kc *K8sConfig) DataForKey(clusterid, key string) (data string, err error) {
|
||||
secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get("ceph-cluster-"+clusterid, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error fetching configuration for cluster ID (%s). (%s)", clusterid, err)
|
||||
return
|
||||
}
|
||||
|
||||
content, ok := secret.Data[key]
|
||||
if !ok {
|
||||
err = fmt.Errorf("missing data for key (%s) in cluster configuration of (%s)", key, clusterid)
|
||||
return
|
||||
}
|
||||
|
||||
data = string(content)
|
||||
return
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// InitLogging initializes klog alongside glog
|
||||
// XXX: This is just a temporary solution till all deps move to klog
|
||||
func InitLogging() {
|
||||
if err := flag.Set("logtostderr", "true"); err != nil {
|
||||
klog.Errorf("failed to set logtostderr flag: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(klogFlags)
|
||||
|
||||
// Sync klog flags with glog
|
||||
flag.CommandLine.VisitAll(func(f1 *flag.Flag) {
|
||||
if f2 := klogFlags.Lookup(f1.Name); f2 != nil {
|
||||
f2.Value.Set(f1.Value.String()) // nolint: errcheck, gosec
|
||||
}
|
||||
})
|
||||
}
|
@ -32,10 +32,9 @@ import (
|
||||
// NodeCache to store metadata
|
||||
type NodeCache struct {
|
||||
BasePath string
|
||||
CacheDir string
|
||||
}
|
||||
|
||||
var cacheDir = "controller"
|
||||
|
||||
var errDec = errors.New("file not found")
|
||||
|
||||
// EnsureCacheDirectory creates cache directory if not present
|
||||
@ -44,7 +43,7 @@ func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error {
|
||||
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
||||
// #nosec
|
||||
if err := os.Mkdir(fullPath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "node-cache: failed to create %s folder with error: %v", fullPath, err)
|
||||
return errors.Wrapf(err, "node-cache: failed to create %s folder", fullPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -52,15 +51,15 @@ func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error {
|
||||
|
||||
//ForAll list the metadata in Nodecache and filters outs based on the pattern
|
||||
func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error {
|
||||
err := nc.EnsureCacheDirectory(cacheDir)
|
||||
err := nc.EnsureCacheDirectory(nc.CacheDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "node-cache: couldn't ensure cache directory exists")
|
||||
}
|
||||
files, err := ioutil.ReadDir(path.Join(nc.BasePath, cacheDir))
|
||||
files, err := ioutil.ReadDir(path.Join(nc.BasePath, nc.CacheDir))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "node-cache: failed to read %s folder", nc.BasePath)
|
||||
}
|
||||
path := path.Join(nc.BasePath, cacheDir)
|
||||
path := path.Join(nc.BasePath, nc.CacheDir)
|
||||
for _, file := range files {
|
||||
err = decodeObj(path, pattern, file, destObj)
|
||||
if err == errDec {
|
||||
@ -104,7 +103,7 @@ func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{})
|
||||
|
||||
// Create creates the metadata file in cache directory with identifier name
|
||||
func (nc *NodeCache) Create(identifier string, data interface{}) error {
|
||||
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
|
||||
file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json")
|
||||
fp, err := os.Create(file)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "node-cache: failed to create metadata storage file %s\n", file)
|
||||
@ -126,10 +125,14 @@ func (nc *NodeCache) Create(identifier string, data interface{}) error {
|
||||
|
||||
// Get retrieves the metadata from cache directory with identifier name
|
||||
func (nc *NodeCache) Get(identifier string, data interface{}) error {
|
||||
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
|
||||
file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json")
|
||||
// #nosec
|
||||
fp, err := os.Open(file)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return &CacheEntryNotFound{err}
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "node-cache: open error for %s", file)
|
||||
}
|
||||
|
||||
@ -149,12 +152,16 @@ func (nc *NodeCache) Get(identifier string, data interface{}) error {
|
||||
|
||||
// Delete deletes the metadata file from cache directory with identifier name
|
||||
func (nc *NodeCache) Delete(identifier string) error {
|
||||
file := path.Join(nc.BasePath, cacheDir, identifier+".json")
|
||||
file := path.Join(nc.BasePath, nc.CacheDir, identifier+".json")
|
||||
err := os.Remove(file)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
return errors.Wrapf(err, "node-cache: error removing file %s", file)
|
||||
if err == os.ErrNotExist {
|
||||
klog.V(4).Infof("node-cache: cannot delete missing metadata storage file %s, assuming it's already deleted", file)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "node-cache: error removing file %s", file)
|
||||
|
||||
}
|
||||
klog.V(4).Infof("node-cache: successfully deleted metadata storage file at: %+v\n", file)
|
||||
return nil
|
||||
|
76
pkg/util/stripsecrets.go
Normal file
76
pkg/util/stripsecrets.go
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
keyArg = "--key="
|
||||
secretArg = "secret="
|
||||
optionsArgSeparator = ','
|
||||
strippedKey = "--key=***stripped***"
|
||||
strippedSecret = "secret=***stripped***"
|
||||
)
|
||||
|
||||
// StripSecretInArgs strips values of either "--key" or "secret=".
|
||||
// `args` is left unchanged.
|
||||
// Expects only one occurrence of either "--key" or "secret=".
|
||||
func StripSecretInArgs(args []string) []string {
|
||||
out := make([]string, len(args))
|
||||
copy(out, args)
|
||||
|
||||
if !stripKey(out) {
|
||||
stripSecret(out)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func stripKey(out []string) bool {
|
||||
for i := range out {
|
||||
if strings.HasPrefix(out[i], keyArg) {
|
||||
out[i] = strippedKey
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func stripSecret(out []string) bool {
|
||||
for i := range out {
|
||||
arg := out[i]
|
||||
begin := strings.Index(arg, secretArg)
|
||||
|
||||
if begin == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
end := strings.IndexByte(arg[begin+len(secretArg):], optionsArgSeparator)
|
||||
|
||||
out[i] = arg[:begin] + strippedSecret
|
||||
if end != -1 {
|
||||
out[i] += arg[end+len(secretArg):]
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
93
pkg/util/util.go
Normal file
93
pkg/util/util.go
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// remove this once kubernetes v1.14.0 release is done
|
||||
// https://github.com/kubernetes/cloud-provider/blob/master/volume/helpers/rounding.go
|
||||
const (
|
||||
// MiB - MebiByte size
|
||||
MiB = 1024 * 1024
|
||||
)
|
||||
|
||||
// RoundUpToMiB rounds up given quantity upto chunks of MiB
|
||||
func RoundUpToMiB(size int64) int64 {
|
||||
requestBytes := size
|
||||
return roundUpSize(requestBytes, MiB)
|
||||
}
|
||||
|
||||
func roundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
||||
roundedUp := volumeSizeBytes / allocationUnitBytes
|
||||
if volumeSizeBytes%allocationUnitBytes > 0 {
|
||||
roundedUp++
|
||||
}
|
||||
return roundedUp
|
||||
}
|
||||
|
||||
// CreatePersistanceStorage creates storage path and initializes new cache
|
||||
func CreatePersistanceStorage(sPath, metaDataStore, driverName string) (CachePersister, error) {
|
||||
var err error
|
||||
if err = createPersistentStorage(path.Join(sPath, "controller")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for controller: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = createPersistentStorage(path.Join(sPath, "node")); err != nil {
|
||||
klog.Errorf("failed to create persistent storage for node: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cp, err := NewCachePersister(metaDataStore, driverName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to define cache persistence method: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return cp, err
|
||||
}
|
||||
|
||||
func createPersistentStorage(persistentStoragePath string) error {
|
||||
return os.MkdirAll(persistentStoragePath, os.FileMode(0755))
|
||||
}
|
||||
|
||||
// ValidateDriverName validates the driver name
|
||||
func ValidateDriverName(driverName string) error {
|
||||
if len(driverName) == 0 {
|
||||
return errors.New("driver name is empty")
|
||||
}
|
||||
|
||||
if len(driverName) > 63 {
|
||||
return errors.New("driver name length should be less than 63 chars")
|
||||
}
|
||||
var err error
|
||||
for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {
|
||||
if err == nil {
|
||||
err = errors.New(msg)
|
||||
continue
|
||||
}
|
||||
err = errors.Wrap(err, msg)
|
||||
}
|
||||
return err
|
||||
}
|
153
scripts/golangci.yml
Normal file
153
scripts/golangci.yml
Normal file
@ -0,0 +1,153 @@
|
||||
---
|
||||
# https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 10m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- vendor$
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
skip-files:
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate,
|
||||
# default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions:
|
||||
# `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: true
|
||||
|
||||
# report about assignment of errors to blank identifier:
|
||||
# `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: true
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
# exclude: /path/to/file.txt
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/ceph/csph-csi
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 20
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/davecgh/go-spew/spew
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to
|
||||
# 'color'.
|
||||
locale: US
|
||||
ignore-words:
|
||||
- someword
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the
|
||||
# tab-width option
|
||||
# TODO make line length to 120 char
|
||||
line-length: 180
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported
|
||||
# identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of
|
||||
# false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages.
|
||||
# All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external
|
||||
# program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of
|
||||
# false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external
|
||||
# interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and
|
||||
# it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- megacheck
|
||||
- govet
|
||||
- golint
|
||||
- stylecheck
|
||||
- interfacer
|
||||
- unconvert
|
||||
- gofmt
|
||||
- gocyclo
|
||||
- maligned
|
||||
- lll
|
||||
- nakedret
|
||||
enable-all: false
|
||||
disable:
|
||||
- prealloc
|
||||
disable-all: false
|
||||
presets:
|
||||
- bugs
|
||||
- unused
|
||||
fast: false
|
@ -2,11 +2,8 @@
|
||||
|
||||
set -o pipefail
|
||||
|
||||
if [[ -x "$(command -v gometalinter)" ]]; then
|
||||
gometalinter -j "${GO_METALINTER_THREADS:-1}" \
|
||||
--sort path --sort line --sort column --deadline=10m \
|
||||
--enable=misspell --enable=staticcheck \
|
||||
--vendor "${@-./...}"
|
||||
if [[ -x "$(command -v golangci-lint)" ]]; then
|
||||
golangci-lint --config=scripts/golangci.yml run ./... -v
|
||||
else
|
||||
echo "WARNING: gometalinter not found, skipping lint tests" >&2
|
||||
echo "WARNING: golangci-lint not found, skipping lint tests" >&2
|
||||
fi
|
||||
|
@ -44,6 +44,6 @@ run_check '.*\.(ba)?sh' bash -n
|
||||
|
||||
# Install via: pip install yamllint
|
||||
# disable yamlint chekck for helm chats
|
||||
run_check '.*\.ya?ml' yamllint -s -d "{extends: default, rules: {line-length: {allow-non-breakable-inline-mappings: true}},ignore: deploy/rbd/helm/templates/*.yaml}"
|
||||
run_check '.*\.ya?ml' yamllint -s -d "{extends: default, rules: {line-length: {allow-non-breakable-inline-mappings: true}},ignore: deploy/*/helm/templates/*.yaml}"
|
||||
|
||||
echo "ALL OK."
|
||||
|
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||
//
|
||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
program = filepath.Base(os.Args[0])
|
||||
host = "unknownhost"
|
||||
userName = "unknownuser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h, err := os.Hostname()
|
||||
if err == nil {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
}
|
||||
|
||||
// Sanitize userName since it may contain filepath separators on Windows.
|
||||
userName = strings.Replace(userName, `\`, "_", -1)
|
||||
}
|
||||
|
||||
// shortHostname returns its argument, truncating at the first period.
|
||||
// For instance, given "www.google.com" it returns "www".
|
||||
func shortHostname(hostname string) string {
|
||||
if i := strings.Index(hostname, "."); i >= 0 {
|
||||
return hostname[:i]
|
||||
}
|
||||
return hostname
|
||||
}
|
||||
|
||||
// logName returns a new log file name containing tag, with start time t, and
|
||||
// the name for the symlink for tag.
|
||||
func logName(tag string, t time.Time) (name, link string) {
|
||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||
program,
|
||||
host,
|
||||
userName,
|
||||
tag,
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
t.Second(),
|
||||
pid)
|
||||
return name, program + "." + tag
|
||||
}
|
||||
|
||||
var onceLogDirs sync.Once
|
||||
|
||||
// create creates a new log file and returns the file and its filename, which
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user