ceph-csi/pkg/rbd/rbd.go
ShyamsundarR 2064e674a4 Addressed using k8s client APIs to fetch secrets
Based on the review comments addressed the following,
- Moved away from having to update the pod with volumes
when a new Ceph cluster is added for provisioning via the
CSI driver

- The above now used k8s APIs to fetch secrets
  - TBD: Need to add a watch mechanisim such that these
secrets can be cached and updated when changed

- Folded the Cephc configuration and ID/key config map
and secrets into a single secret

- Provided the ability to read the same config via mapped
or created files within the pod

Tests:
- Ran PV creation/deletion/attach/use using new scheme
StorageClass
- Ran PV creation/deletion/attach/use using older scheme
to ensure nothing is broken
- Did not execute snapshot related tests

Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-03-26 16:19:24 +00:00

141 lines
4.4 KiB
Go

/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/ceph/ceph-csi/pkg/util"
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/nsenter"
"k8s.io/utils/exec"
)
// PluginFolder defines the location of rbdplugin
const (
rbdDefaultAdminID = "admin"
rbdDefaultUserID = rbdDefaultAdminID
)
// PluginFolder defines the location of ceph plugin
var PluginFolder = "/var/lib/kubelet/plugins/"
// Driver contains the default identity,node and controller struct
type Driver struct {
cd *csicommon.CSIDriver
ids *IdentityServer
ns *NodeServer
cs *ControllerServer
}
var (
version = "1.0.0"
// ConfStore is the global config store
ConfStore *util.ConfigStore
)
// NewDriver returns new rbd driver
func NewDriver() *Driver {
return &Driver{}
}
// NewIdentityServer initialize a identity server for rbd CSI driver
func NewIdentityServer(d *csicommon.CSIDriver) *IdentityServer {
return &IdentityServer{
DefaultIdentityServer: csicommon.NewDefaultIdentityServer(d),
}
}
// NewControllerServer initialize a controller server for rbd CSI driver
func NewControllerServer(d *csicommon.CSIDriver, cachePersister util.CachePersister) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(d),
MetadataStore: cachePersister,
}
}
// NewNodeServer initialize a node server for rbd CSI driver.
func NewNodeServer(d *csicommon.CSIDriver, containerized bool) (*NodeServer, error) {
mounter := mount.New("")
if containerized {
ne, err := nsenter.NewNsenter(nsenter.DefaultHostRootFsPath, exec.New())
if err != nil {
return nil, err
}
mounter = mount.NewNsenterMounter("", ne)
}
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d),
mounter: mounter,
}, nil
}
// Run start a non-blocking grpc controller,node and identityserver for
// rbd CSI driver which can serve multiple parallel requests
func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, configroot string, cachePersister util.CachePersister) {
var err error
klog.Infof("Driver: %v version: %v", driverName, version)
// Initialize config store
ConfStore, err = util.NewConfigStore(configroot)
if err != nil {
klog.Fatalln("Failed to initialize config store.")
}
// Initialize default library driver
r.cd = csicommon.NewCSIDriver(driverName, version, nodeID)
if r.cd == nil {
klog.Fatalln("Failed to initialize CSI Driver.")
}
r.cd.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
})
// We only support the multi-writer option when using block, but it's a supported capability for the plugin in general
// In addition, we want to add the remaining modes like MULTI_NODE_READER_ONLY,
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
// will work those as follow up features
r.cd.AddVolumeCapabilityAccessModes(
[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
// Create GRPC servers
r.ids = NewIdentityServer(r.cd)
r.ns, err = NewNodeServer(r.cd, containerized)
if err != nil {
klog.Fatalf("failed to start node server, err %v\n", err)
}
r.cs = NewControllerServer(r.cd, cachePersister)
if err = r.cs.LoadExDataFromMetadataStore(); err != nil {
klog.Fatalf("failed to load metadata from store, err %v\n", err)
}
s := csicommon.NewNonBlockingGRPCServer()
s.Start(endpoint, r.ids, r.cs, r.ns)
s.Wait()
}