Support mounting and deleting version 1.0.0 RBD volumes

This commit adds support to mount and delete volumes provisioned by older
plugin versions (1.0.0) in order to support backward compatibility to 1.0.0
created volumes.

It adds back the ability to specify where older meta data was specified, using
the metadatastorage option to the plugin. Further, using the provided meta data
to mount and delete the older volumes.

It also supports a variety of ways in which monitor information may have been
specified (in the storage class, or in the secret), to keep the monitor
information current.

Testing done:
- Mount/Delete 1.0.0 plugin created volume with monitors in the StorageClass
- Mount/Delete 1.0.0 plugin created volume with monitors in the secret with
  a key "monitors"
- Mount/Delete 1.0.0 plugin created volume with monitors in the secret with
  a user specified key
- PVC creation and deletion with the current version (to ensure at the minimum
  no broken functionality)
- Tested some negative cases, where monitor information is missing in secrets
  or present with a different key name, to understand if failure scenarios work
  as expected

Updates #378

Follow-up work:
- Documentation on how to upgrade to 1.1 plugin and retain above functionality
  for older volumes

Signed-off-by: ShyamsundarR <srangana@redhat.com>
This commit is contained in:
ShyamsundarR
2019-05-31 14:09:24 -04:00
committed by mergify[bot]
parent 09f126691c
commit fa68c35f3b
7 changed files with 234 additions and 52 deletions

View File

@ -44,14 +44,14 @@ var (
nodeID = flag.String("nodeid", "", "node id")
instanceID = flag.String("instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+
" instances, when sharing Ceph clusters across CSI instances for provisioning")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
// rbd related flags
containerized = flag.Bool("containerized", true, "whether run as containerized")
// cephfs related flags
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
metadataStorage = flag.String("metadatastorage", "", "metadata persistence method [node|k8s_configmap]")
volumeMounter = flag.String("volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
mountCacheDir = flag.String("mountcachedir", "", "mount info cache save dir")
)
func init() {
@ -109,8 +109,15 @@ func main() {
switch driverType {
case rbdType:
rbd.PluginFolder += dname
if *metadataStorage != "" {
cp, err = util.CreatePersistanceStorage(
rbd.PluginFolder, *metadataStorage, dname)
if err != nil {
os.Exit(1)
}
}
driver := rbd.NewDriver()
driver.Run(dname, *nodeID, *endpoint, *instanceID, *containerized)
driver.Run(dname, *nodeID, *endpoint, *instanceID, *containerized, cp)
case cephfsType:
cephfs.PluginFolder += dname