2018-01-09 18:59:50 +00:00
/ *
2019-04-03 08:46:15 +00:00
Copyright 2018 The Ceph - CSI Authors .
2018-01-09 18:59:50 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package rbd
import (
2018-01-16 01:52:28 +00:00
"fmt"
2018-01-09 18:59:50 +00:00
"os"
2018-11-07 02:05:19 +00:00
"os/exec"
"regexp"
2018-01-16 01:52:28 +00:00
"strings"
2018-01-09 18:59:50 +00:00
2019-05-13 04:47:17 +00:00
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
2019-04-22 21:35:39 +00:00
"github.com/ceph/ceph-csi/pkg/util"
2018-01-09 18:59:50 +00:00
2018-11-24 19:18:24 +00:00
"github.com/container-storage-interface/spec/lib/go/csi"
2019-02-18 11:30:28 +00:00
"golang.org/x/net/context"
2018-01-09 18:59:50 +00:00
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
2019-02-18 11:30:28 +00:00
"k8s.io/klog"
2018-01-09 18:59:50 +00:00
"k8s.io/kubernetes/pkg/util/mount"
)
2019-01-28 11:47:06 +00:00
// NodeServer struct of ceph rbd driver with supported methods of CSI
// node server spec
2019-01-17 07:51:06 +00:00
type NodeServer struct {
2018-01-09 18:59:50 +00:00
* csicommon . DefaultNodeServer
2018-10-15 14:59:41 +00:00
mounter mount . Interface
2018-01-09 18:59:50 +00:00
}
2019-07-03 10:02:36 +00:00
// NodeStageVolume mounts the volume to a staging path on the node.
2019-07-31 16:24:19 +00:00
// Implementation notes:
// - stagingTargetPath is the directory passed in the request where the volume needs to be staged
// - We stage the volume into a directory, named after the VolumeID inside stagingTargetPath if
// it is a file system
// - We stage the volume into a file, named after the VolumeID inside stagingTargetPath if it is
// a block volume
2019-07-03 10:02:36 +00:00
func ( ns * NodeServer ) NodeStageVolume ( ctx context . Context , req * csi . NodeStageVolumeRequest ) ( * csi . NodeStageVolumeResponse , error ) {
if err := util . ValidateNodeStageVolumeRequest ( req ) ; err != nil {
return nil , err
2019-04-22 21:35:39 +00:00
}
2019-01-28 13:59:16 +00:00
2019-07-03 10:02:36 +00:00
isBlock := req . GetVolumeCapability ( ) . GetBlock ( ) != nil
disableInUseChecks := false
// MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes
if req . VolumeCapability . AccessMode . Mode == csi . VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
if isBlock {
disableInUseChecks = true
} else {
klog . Warningf ( "MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`, invalid AccessMode for volume: %v" , req . GetVolumeId ( ) )
return nil , status . Error ( codes . InvalidArgument , "rbd: RWX access mode request is only valid for volumes with access type `block`" )
}
2019-04-22 21:35:39 +00:00
}
2019-07-03 10:02:36 +00:00
volID := req . GetVolumeId ( )
2019-04-22 21:35:39 +00:00
2019-06-25 19:29:17 +00:00
cr , err := util . NewUserCredentials ( req . GetSecrets ( ) )
2019-06-01 21:26:42 +00:00
if err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
2019-06-25 19:29:17 +00:00
defer cr . DeleteCredentials ( )
2019-06-01 21:26:42 +00:00
2019-05-31 18:09:24 +00:00
isLegacyVolume := false
2019-07-03 10:02:36 +00:00
volName , err := getVolumeName ( req . GetVolumeId ( ) )
2019-05-31 18:09:24 +00:00
if err != nil {
// error ErrInvalidVolID may mean this is an 1.0.0 version volume, check for name
// pattern match in addition to error to ensure this is a likely v1.0.0 volume
if _ , ok := err . ( ErrInvalidVolID ) ; ! ok || ! isLegacyVolumeID ( req . GetVolumeId ( ) ) {
return nil , status . Error ( codes . InvalidArgument , err . Error ( ) )
}
2019-07-03 10:02:36 +00:00
volName , err = getLegacyVolumeName ( req . GetStagingTargetPath ( ) )
2019-05-31 18:09:24 +00:00
if err != nil {
return nil , status . Error ( codes . InvalidArgument , err . Error ( ) )
}
isLegacyVolume = true
}
2019-07-31 16:24:19 +00:00
stagingTargetPath := req . GetStagingTargetPath ( )
stagingTargetPath += "/" + volID
2018-01-09 18:59:50 +00:00
2019-07-03 10:02:36 +00:00
idLk := nodeVolumeIDLocker . Lock ( volID )
defer nodeVolumeIDLocker . Unlock ( idLk , volID )
2019-07-25 09:01:10 +00:00
var isNotMnt bool
// check if stagingPath is already mounted
isNotMnt , err = mount . IsNotMountPoint ( ns . mounter , stagingTargetPath )
if err != nil && ! os . IsNotExist ( err ) {
2019-07-03 10:02:36 +00:00
return nil , status . Error ( codes . Internal , err . Error ( ) )
2018-01-09 18:59:50 +00:00
}
2019-03-14 00:18:04 +00:00
2019-07-03 10:02:36 +00:00
if ! isNotMnt {
klog . Infof ( "rbd: volume %s is already mounted to %s, skipping" , req . GetVolumeId ( ) , stagingTargetPath )
return & csi . NodeStageVolumeResponse { } , nil
2019-03-14 00:18:04 +00:00
}
2019-05-31 18:09:24 +00:00
volOptions , err := genVolFromVolumeOptions ( req . GetVolumeContext ( ) , req . GetSecrets ( ) , disableInUseChecks , isLegacyVolume )
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
if err != nil {
2019-07-03 10:02:36 +00:00
return nil , status . Error ( codes . Internal , err . Error ( ) )
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
}
2019-04-22 21:35:39 +00:00
volOptions . RbdImageName = volName
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
2018-01-09 18:59:50 +00:00
// Mapping RBD image
2019-06-01 21:26:42 +00:00
devicePath , err := attachRBDImage ( volOptions , cr )
2018-01-09 18:59:50 +00:00
if err != nil {
2019-07-03 10:02:36 +00:00
return nil , status . Error ( codes . Internal , err . Error ( ) )
2018-01-09 18:59:50 +00:00
}
2019-02-04 13:05:07 +00:00
klog . V ( 4 ) . Infof ( "rbd image: %s/%s was successfully mapped at %s\n" , req . GetVolumeId ( ) , volOptions . Pool , devicePath )
2018-01-09 18:59:50 +00:00
2019-07-24 13:18:23 +00:00
isMounted := false
2019-07-25 09:01:10 +00:00
isStagePathCreated := false
2019-07-24 13:18:23 +00:00
// if mounting to stagingpath fails unmap the rbd device. this wont leave any
// stale rbd device if unstage is not called
defer func ( ) {
if err != nil {
2019-07-31 16:24:19 +00:00
ns . cleanupStagingPath ( stagingTargetPath , devicePath , volID , isStagePathCreated , isMounted )
2019-07-24 13:18:23 +00:00
}
} ( )
2019-07-25 09:01:10 +00:00
err = ns . createStageMountPoint ( stagingTargetPath , isBlock )
if err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
isStagePathCreated = true
2019-07-03 10:02:36 +00:00
// nodeStage Path
err = ns . mountVolumeToStagePath ( req , stagingTargetPath , devicePath )
2019-01-28 19:55:10 +00:00
if err != nil {
2019-07-03 10:02:36 +00:00
return nil , status . Error ( codes . Internal , err . Error ( ) )
2019-01-28 19:55:10 +00:00
}
2019-07-24 13:18:23 +00:00
isMounted = true
2019-07-25 09:01:10 +00:00
2019-07-03 10:02:36 +00:00
err = os . Chmod ( stagingTargetPath , 0777 )
2019-06-11 12:40:31 +00:00
if err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
2019-07-03 10:02:36 +00:00
klog . Infof ( "rbd: successfully mounted volume %s to stagingTargetPath %s" , req . GetVolumeId ( ) , stagingTargetPath )
return & csi . NodeStageVolumeResponse { } , nil
}
2019-07-31 16:24:19 +00:00
func ( ns * NodeServer ) cleanupStagingPath ( stagingTargetPath , devicePath , volID string , isStagePathCreated , isMounted bool ) {
2019-07-24 13:18:23 +00:00
var err error
if isMounted {
err = ns . mounter . Unmount ( stagingTargetPath )
if err != nil {
klog . Errorf ( "failed to unmount stagingtargetPath: %s with error: %v" , stagingTargetPath , err )
}
}
// remove the block file created on staging path
2019-07-31 16:24:19 +00:00
if isStagePathCreated {
2019-07-24 13:18:23 +00:00
err = os . Remove ( stagingTargetPath )
if err != nil {
klog . Errorf ( "failed to remove stagingtargetPath: %s with error: %v" , stagingTargetPath , err )
}
}
// Unmapping rbd device
if err = detachRBDDevice ( devicePath ) ; err != nil {
klog . Errorf ( "failed to unmap rbd device: %s for volume %s with error: %v" , devicePath , volID , err )
}
}
2019-07-25 09:01:10 +00:00
func ( ns * NodeServer ) createStageMountPoint ( mountPath string , isBlock bool ) error {
if isBlock {
pathFile , err := os . OpenFile ( mountPath , os . O_CREATE | os . O_RDWR , 0750 )
if err != nil {
klog . Errorf ( "failed to create mountPath:%s with error: %v" , mountPath , err )
return status . Error ( codes . Internal , err . Error ( ) )
}
if err = pathFile . Close ( ) ; err != nil {
klog . Errorf ( "failed to close mountPath:%s with error: %v" , mountPath , err )
return status . Error ( codes . Internal , err . Error ( ) )
}
2019-07-31 16:24:19 +00:00
return nil
2019-07-25 09:01:10 +00:00
}
2019-07-31 16:24:19 +00:00
err := os . Mkdir ( mountPath , 0750 )
if err != nil {
klog . Errorf ( "failed to create mountPath:%s with error: %v" , mountPath , err )
return status . Error ( codes . Internal , err . Error ( ) )
}
2019-07-25 09:01:10 +00:00
return nil
}
2019-07-03 10:02:36 +00:00
// NodePublishVolume mounts the volume mounted to the device path to the target
// path
func ( ns * NodeServer ) NodePublishVolume ( ctx context . Context , req * csi . NodePublishVolumeRequest ) ( * csi . NodePublishVolumeResponse , error ) {
err := util . ValidateNodePublishVolumeRequest ( req )
if err != nil {
return nil , err
}
targetPath := req . GetTargetPath ( )
isBlock := req . GetVolumeCapability ( ) . GetBlock ( ) != nil
stagingPath := req . GetStagingTargetPath ( )
2019-07-31 16:24:19 +00:00
stagingPath += "/" + req . GetVolumeId ( )
2019-07-03 10:02:36 +00:00
idLk := targetPathLocker . Lock ( targetPath )
defer targetPathLocker . Unlock ( idLk , targetPath )
// Check if that target path exists properly
2019-07-25 09:01:10 +00:00
notMnt , err := ns . createTargetMountPath ( targetPath , isBlock )
2019-07-03 10:02:36 +00:00
if err != nil {
return nil , err
}
if ! notMnt {
return & csi . NodePublishVolumeResponse { } , nil
}
// Publish Path
err = ns . mountVolume ( stagingPath , req )
if err != nil {
return nil , err
}
klog . Infof ( "rbd: successfully mounted stagingPath %s to targetPath %s" , stagingPath , targetPath )
2019-01-28 19:55:10 +00:00
return & csi . NodePublishVolumeResponse { } , nil
}
2019-07-03 10:02:36 +00:00
func getVolumeName ( volID string ) ( string , error ) {
2019-04-22 21:35:39 +00:00
var vi util . CSIIdentifier
2019-07-03 10:02:36 +00:00
err := vi . DecomposeCSIID ( volID )
2019-04-22 21:35:39 +00:00
if err != nil {
2019-07-03 10:02:36 +00:00
err = fmt . Errorf ( "error decoding volume ID (%s) (%s)" , err , volID )
2019-05-31 18:09:24 +00:00
return "" , ErrInvalidVolID { err }
2019-01-28 19:55:10 +00:00
}
2019-04-22 21:35:39 +00:00
2019-05-14 19:15:01 +00:00
return volJournal . NamingPrefix ( ) + vi . ObjectUUID , nil
2019-01-28 19:55:10 +00:00
}
2019-07-03 10:02:36 +00:00
func getLegacyVolumeName ( mountPath string ) ( string , error ) {
2019-05-31 18:09:24 +00:00
var volName string
2019-07-03 10:02:36 +00:00
if strings . HasSuffix ( mountPath , "/globalmount" ) {
s := strings . Split ( strings . TrimSuffix ( mountPath , "/globalmount" ) , "/" )
2019-05-31 18:09:24 +00:00
volName = s [ len ( s ) - 1 ]
2019-07-03 10:02:36 +00:00
return volName , nil
}
if strings . HasSuffix ( mountPath , "/mount" ) {
s := strings . Split ( strings . TrimSuffix ( mountPath , "/mount" ) , "/" )
2019-05-31 18:09:24 +00:00
volName = s [ len ( s ) - 1 ]
2019-07-03 10:02:36 +00:00
return volName , nil
2019-05-31 18:09:24 +00:00
}
2019-07-03 10:02:36 +00:00
// get volume name for block volume
s := strings . Split ( mountPath , "/" )
if len ( s ) == 0 {
return "" , fmt . Errorf ( "rbd: malformed value of stage target path: %s" , mountPath )
}
volName = s [ len ( s ) - 1 ]
2019-05-31 18:09:24 +00:00
return volName , nil
}
2019-07-03 10:02:36 +00:00
func ( ns * NodeServer ) mountVolumeToStagePath ( req * csi . NodeStageVolumeRequest , stagingPath , devicePath string ) error {
// Publish Path
fsType := req . GetVolumeCapability ( ) . GetMount ( ) . GetFsType ( )
diskMounter := & mount . SafeFormatAndMount { Interface : ns . mounter , Exec : mount . NewOsExec ( ) }
opt := [ ] string { }
isBlock := req . GetVolumeCapability ( ) . GetBlock ( ) != nil
var err error
if isBlock {
opt = append ( opt , "bind" )
err = diskMounter . Mount ( devicePath , stagingPath , fsType , opt )
} else {
err = diskMounter . FormatAndMount ( devicePath , stagingPath , fsType , opt )
}
if err != nil {
klog . Errorf ( "failed to mount device path (%s) to staging path (%s) for volume (%s) error %s" , devicePath , stagingPath , req . GetVolumeId ( ) , err )
}
return err
}
func ( ns * NodeServer ) mountVolume ( stagingPath string , req * csi . NodePublishVolumeRequest ) error {
2018-11-01 01:03:03 +00:00
// Publish Path
fsType := req . GetVolumeCapability ( ) . GetMount ( ) . GetFsType ( )
2018-01-09 18:59:50 +00:00
readOnly := req . GetReadonly ( )
mountFlags := req . GetVolumeCapability ( ) . GetMount ( ) . GetMountFlags ( )
2019-01-28 19:55:10 +00:00
isBlock := req . GetVolumeCapability ( ) . GetBlock ( ) != nil
targetPath := req . GetTargetPath ( )
2019-07-03 10:02:36 +00:00
klog . V ( 4 ) . Infof ( "target %v\nisBlock %v\nfstype %v\nstagingPath %v\nreadonly %v\nmountflags %v\n" ,
targetPath , isBlock , fsType , stagingPath , readOnly , mountFlags )
mountFlags = append ( mountFlags , "bind" )
if readOnly {
mountFlags = append ( mountFlags , "ro" )
}
2019-07-31 16:24:19 +00:00
if err := util . Mount ( stagingPath , targetPath , fsType , mountFlags ) ; err != nil {
return status . Error ( codes . Internal , err . Error ( ) )
2018-01-09 18:59:50 +00:00
}
2019-07-31 16:24:19 +00:00
2019-01-28 19:55:10 +00:00
return nil
}
2019-07-25 09:01:10 +00:00
func ( ns * NodeServer ) createTargetMountPath ( mountPath string , isBlock bool ) ( bool , error ) {
2019-07-03 10:02:36 +00:00
// Check if that mount path exists properly
notMnt , err := mount . IsNotMountPoint ( ns . mounter , mountPath )
2019-01-28 19:55:10 +00:00
if err != nil {
if os . IsNotExist ( err ) {
if isBlock {
// #nosec
2019-07-03 10:02:36 +00:00
pathFile , e := os . OpenFile ( mountPath , os . O_CREATE | os . O_RDWR , 0750 )
2019-01-28 19:55:10 +00:00
if e != nil {
2019-07-03 10:02:36 +00:00
klog . V ( 4 ) . Infof ( "Failed to create mountPath:%s with error: %v" , mountPath , err )
2019-01-28 19:55:10 +00:00
return notMnt , status . Error ( codes . Internal , e . Error ( ) )
}
2019-07-03 10:02:36 +00:00
if err = pathFile . Close ( ) ; err != nil {
klog . V ( 4 ) . Infof ( "Failed to close mountPath:%s with error: %v" , mountPath , err )
2019-01-28 19:55:10 +00:00
return notMnt , status . Error ( codes . Internal , err . Error ( ) )
}
} else {
// Create a directory
2019-07-03 10:02:36 +00:00
if err = util . CreateMountPoint ( mountPath ) ; err != nil {
2019-01-28 19:55:10 +00:00
return notMnt , status . Error ( codes . Internal , err . Error ( ) )
}
}
notMnt = true
} else {
return false , status . Error ( codes . Internal , err . Error ( ) )
}
}
return notMnt , err
2018-01-09 18:59:50 +00:00
}
2019-01-28 11:47:06 +00:00
// NodeUnpublishVolume unmounts the volume from the target path
2019-01-17 07:51:06 +00:00
func ( ns * NodeServer ) NodeUnpublishVolume ( ctx context . Context , req * csi . NodeUnpublishVolumeRequest ) ( * csi . NodeUnpublishVolumeResponse , error ) {
2019-07-03 10:02:36 +00:00
err := util . ValidateNodeUnpublishVolumeRequest ( req )
if err != nil {
return nil , err
2019-04-22 21:35:39 +00:00
}
2019-07-03 10:02:36 +00:00
targetPath := req . GetTargetPath ( )
2019-06-24 07:58:39 +00:00
notMnt , err := mount . IsNotMountPoint ( ns . mounter , targetPath )
2018-11-15 02:06:42 +00:00
if err != nil {
if os . IsNotExist ( err ) {
// targetPath has already been deleted
2019-02-04 13:05:07 +00:00
klog . V ( 4 ) . Infof ( "targetPath: %s has already been deleted" , targetPath )
2018-11-15 02:06:42 +00:00
return & csi . NodeUnpublishVolumeResponse { } , nil
}
return nil , status . Error ( codes . NotFound , err . Error ( ) )
}
2018-11-15 20:40:19 +00:00
if notMnt {
2019-07-03 10:02:36 +00:00
if err = os . RemoveAll ( targetPath ) ; err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
return & csi . NodeUnpublishVolumeResponse { } , nil
}
if err = ns . mounter . Unmount ( targetPath ) ; err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
if err = os . RemoveAll ( targetPath ) ; err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
klog . Infof ( "rbd: successfully unbinded volume %s from %s" , req . GetVolumeId ( ) , targetPath )
return & csi . NodeUnpublishVolumeResponse { } , nil
}
// NodeUnstageVolume unstages the volume from the staging path
func ( ns * NodeServer ) NodeUnstageVolume ( ctx context . Context , req * csi . NodeUnstageVolumeRequest ) ( * csi . NodeUnstageVolumeResponse , error ) {
var err error
if err = util . ValidateNodeUnstageVolumeRequest ( req ) ; err != nil {
return nil , err
}
stagingTargetPath := req . GetStagingTargetPath ( )
2019-07-31 16:24:19 +00:00
stagingTargetPath += "/" + req . GetVolumeId ( )
2019-07-03 10:02:36 +00:00
notMnt , err := mount . IsNotMountPoint ( ns . mounter , stagingTargetPath )
if err != nil {
if os . IsNotExist ( err ) {
// staging targetPath has already been deleted
klog . V ( 4 ) . Infof ( "stagingTargetPath: %s has already been deleted" , stagingTargetPath )
return & csi . NodeUnstageVolumeResponse { } , nil
}
return nil , status . Error ( codes . NotFound , err . Error ( ) )
2018-11-15 20:40:19 +00:00
}
2019-07-03 10:02:36 +00:00
if notMnt {
2019-07-31 16:24:19 +00:00
// TODO: IsNotExist error should have been caught above
if err = os . Remove ( stagingTargetPath ) ; err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
2019-07-03 10:02:36 +00:00
}
return & csi . NodeUnstageVolumeResponse { } , nil
}
2019-07-31 16:24:19 +00:00
2019-07-03 10:02:36 +00:00
// Unmount the volume
devicePath , cnt , err := mount . GetDeviceNameFromMount ( ns . mounter , stagingTargetPath )
2018-02-20 16:10:59 +00:00
if err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
}
2019-07-03 10:02:36 +00:00
if err = ns . unmount ( stagingTargetPath , devicePath , cnt ) ; err != nil {
2019-01-29 05:49:16 +00:00
return nil , err
}
2019-07-31 16:24:19 +00:00
if err = os . Remove ( stagingTargetPath ) ; err != nil {
return nil , status . Error ( codes . Internal , err . Error ( ) )
2019-07-03 10:02:36 +00:00
}
klog . Infof ( "rbd: successfully unmounted volume %s from %s" , req . GetVolumeId ( ) , stagingTargetPath )
return & csi . NodeUnstageVolumeResponse { } , nil
2019-01-29 05:49:16 +00:00
}
func ( ns * NodeServer ) unmount ( targetPath , devicePath string , cnt int ) error {
var err error
2018-11-15 20:40:19 +00:00
// Bind mounted device needs to be resolved by using resolveBindMountedBlockDevice
if devicePath == "devtmpfs" {
devicePath , err = resolveBindMountedBlockDevice ( targetPath )
if err != nil {
2019-01-29 05:49:16 +00:00
return status . Error ( codes . Internal , err . Error ( ) )
2018-11-15 20:40:19 +00:00
}
2019-02-04 13:05:07 +00:00
klog . V ( 4 ) . Infof ( "NodeUnpublishVolume: devicePath: %s, (original)cnt: %d\n" , devicePath , cnt )
2018-11-15 20:40:19 +00:00
// cnt for GetDeviceNameFromMount is broken for bind mouted device,
// it counts total number of mounted "devtmpfs", instead of counting this device.
// So, forcibly setting cnt to 1 here.
// TODO : fix this properly
cnt = 1
}
2019-02-04 13:05:07 +00:00
klog . V ( 4 ) . Infof ( "NodeUnpublishVolume: targetPath: %s, devicePath: %s\n" , targetPath , devicePath )
2018-11-15 20:40:19 +00:00
2018-01-09 18:59:50 +00:00
// Unmounting the image
2018-10-15 14:59:41 +00:00
err = ns . mounter . Unmount ( targetPath )
2018-01-09 18:59:50 +00:00
if err != nil {
2019-02-04 13:05:07 +00:00
klog . V ( 3 ) . Infof ( "failed to unmount targetPath: %s with error: %v" , targetPath , err )
2019-01-29 05:49:16 +00:00
return status . Error ( codes . Internal , err . Error ( ) )
2018-01-09 18:59:50 +00:00
}
2018-02-20 16:10:59 +00:00
cnt --
if cnt != 0 {
2018-11-15 20:40:19 +00:00
// TODO should this be fixed not to success, so that driver can retry unmounting?
2019-01-29 05:49:16 +00:00
return nil
2018-01-09 18:59:50 +00:00
}
2018-02-20 16:10:59 +00:00
// Unmapping rbd device
2019-01-28 13:59:16 +00:00
if err = detachRBDDevice ( devicePath ) ; err != nil {
2019-02-04 13:05:07 +00:00
klog . V ( 3 ) . Infof ( "failed to unmap rbd device: %s with error: %v" , devicePath , err )
2019-07-03 10:02:36 +00:00
return status . Error ( codes . Internal , err . Error ( ) )
2018-01-09 18:59:50 +00:00
}
2019-07-03 10:02:36 +00:00
return nil
2018-01-09 18:59:50 +00:00
}
2018-11-07 02:05:19 +00:00
func resolveBindMountedBlockDevice ( mountPath string ) ( string , error ) {
2019-01-28 19:55:10 +00:00
// #nosec
2018-11-07 02:05:19 +00:00
cmd := exec . Command ( "findmnt" , "-n" , "-o" , "SOURCE" , "--first-only" , "--target" , mountPath )
out , err := cmd . CombinedOutput ( )
if err != nil {
2019-02-04 13:05:07 +00:00
klog . V ( 2 ) . Infof ( "Failed findmnt command for path %s: %s %v" , mountPath , out , err )
2018-11-07 02:05:19 +00:00
return "" , err
}
return parseFindMntResolveSource ( string ( out ) )
}
// parse output of "findmnt -o SOURCE --first-only --target" and return just the SOURCE
func parseFindMntResolveSource ( out string ) ( string , error ) {
// cut trailing newline
out = strings . TrimSuffix ( out , "\n" )
// Check if out is a mounted device
2019-01-29 05:49:16 +00:00
reMnt := regexp . MustCompile ( "^(/[^/]+(?:/[^/]*)*)$" )
2018-11-07 02:05:19 +00:00
if match := reMnt . FindStringSubmatch ( out ) ; match != nil {
return match [ 1 ] , nil
}
// Check if out is a block device
2019-01-29 05:49:16 +00:00
// nolint
reBlk := regexp . MustCompile ( "^devtmpfs\\[(/[^/]+(?:/[^/]*)*)\\]$" )
2018-11-07 02:05:19 +00:00
if match := reBlk . FindStringSubmatch ( out ) ; match != nil {
return fmt . Sprintf ( "/dev%s" , match [ 1 ] ) , nil
}
return "" , fmt . Errorf ( "parseFindMntResolveSource: %s doesn't match to any expected findMnt output" , out )
}
2019-07-03 10:02:36 +00:00
// NodeGetCapabilities returns the supported capabilities of the node server
func ( ns * NodeServer ) NodeGetCapabilities ( ctx context . Context , req * csi . NodeGetCapabilitiesRequest ) ( * csi . NodeGetCapabilitiesResponse , error ) {
return & csi . NodeGetCapabilitiesResponse {
Capabilities : [ ] * csi . NodeServiceCapability {
{
Type : & csi . NodeServiceCapability_Rpc {
Rpc : & csi . NodeServiceCapability_RPC {
Type : csi . NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME ,
} ,
} ,
} ,
2019-07-30 06:20:28 +00:00
{
Type : & csi . NodeServiceCapability_Rpc {
Rpc : & csi . NodeServiceCapability_RPC {
Type : csi . NodeServiceCapability_RPC_GET_VOLUME_STATS ,
} ,
} ,
} ,
2019-07-03 10:02:36 +00:00
} ,
} , nil
}